prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
from django.db import models
class TastingCategory(models.Model):
title = models.CharField(max_length=128)
singularTitle = models.CharField(max_length=128)
slug = models.SlugField(max_length=128)
def __unicode__(self):
return self.title
class Tasting(models.Model):
category = models.ForeignKey('TastingCategory', on_delete=models.CASCADE)
slug = models.SlugField(max_length=128)
name = models.CharField(max_length=128)
flair = models.TextField()
mouth = models.TextField()
color = models.TextField()
note = models.IntegerField()
date = mo | dels.DateTimeField(auto_now_add=True, auto_now=False, verbose_name="Date d'ajout")
def __unicode__(self):
return self.category.title + " " + self.name
class WhiskyType(models.Model):
type = models.CharField(max_length=128)
def __un | icode__(self):
return self.type
class CoffeeCountry(models.Model):
country = models.CharField(max_length=128)
def __unicode__(self):
return self.country
class Whisky(Tasting):
old = models.IntegerField()
type = models.ForeignKey('WhiskyType', on_delete=models.CASCADE)
degAlcool = models.IntegerField()
def __str__(self):
return self.type.type + " " + self.name + " " + str(self.old) + " ans"
class Coffee(Tasting):
country = models.ForeignKey('CoffeeCountry', on_delete=models.CASCADE)
altitude = models.IntegerField()
strength = models.IntegerField()
def __str__(self):
return self.category.title + " " + self.country.country + " " + self.name
class Wine(Tasting):
year = models.IntegerField()
degAlcool = models.IntegerField()
def __str__(self):
return self.category.title + " " + self.name |
from django.core.exceptio | ns import ImproperlyConfigured
from django.core.mail import send_mail
from django.db import models
from django.utils import timezone
from django.utils.http import urlquote
from django.utils.translation import ugettext_lazy as _
import warnings
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser, PermissionsMixin
)
class EmailUserManag | er(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
#assert False, "in user manager"
now = timezone.now()
if not email:
raise ValueError('The given email must be set')
#email = UserManager.normalize_email(email)
user = EmailUser(email=email,
is_staff=False, is_active=True, is_superuser=False,
last_login=now, date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, **extra_fields):
u = self.create_user(email, password, **extra_fields)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save(using=self._db)
return u
class EmailUser(AbstractBaseUser, PermissionsMixin):
"""
An abstract base class implementing a fully featured User model with
admin-compliant permissions.
Username, password and email are required. Other fields are optional.
"""
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('email address'), unique=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = EmailUserManager()
USERNAME_FIELD = 'email'
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
#abstract = True,cc
def get_absolute_url(self):
return "/users/%s/" % urlquote(self.username)
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def email_user(self, subject, message, from_email=None):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email])
def get_profile(self):
"""
Returns site-specific profile for this user. Raises
SiteProfileNotAvailable if this site does not allow profiles.
"""
warnings.warn(
"The use of AUTH_PROFILE_MODULE to define user profiles has been deprecated.",
DeprecationWarning)
class PasswordReset(models.Model):
user = models.OneToOneField(EmailUser, related_name="profile")
key = models.CharField(max_length=100)
used = models.BooleanField(default=False)
|
import requests
class Status(object):
SKIP_LOCALES = ['en_US']
def __init__(self, url, app=None, highlight=None):
self.url = url
self.app = app
self.highlight = highlight or []
self.data = []
self.created = None
def get_data(self):
if self.data:
return
resp = requests.get(self.url)
if resp.status_code != 200:
resp.raise_for_status()
self.data = resp.json()
self.created = self.data[-1]['created']
def summary(self):
"""Generates summary data of today's state"""
self.get_data()
highlight = self.highlight
last_item = self.data[-1]
output = {}
output['app'] = self.app or 'ALL'
data = last_item['locales']
if self.app:
get_item = lambda x: x['apps'][self.app]
else:
get_item = lambda x: x
apps = data.items()[0][1]['apps'].keys()
apps.sort()
output['apps'] = apps
items = [item for item in data.items() if item[0] not in highlight]
hitems = [item for item in data.items() if item[0] in highlight]
highlighted = []
if hitems:
for loc, loc_data in sorted(hitems, key=lambda x: -x[1]['percent']):
if loc in self.SKIP_LOCALES:
continue
item = get_item(loc_data)
total = item.get('total', -1)
translated = item.get('translated', -1)
percent = item.get('percent', -1)
untranslated_words = item.get('untranslated_words', -1)
highlighted.append({
'locale': loc,
'percent': percent,
'total': total,
'translated': translated,
'untranslated': total - translated,
'untranslated_words': untranslated_words
})
output['highlighted'] = highlighted
locales = []
for loc, loc_data in sorted(items, key=lambda x: -x[1]['percent']):
if loc in self.SKIP_LOCALES:
continue
item = get_item(loc_data)
total = item.get('total', -1)
translated = item.get('translated', -1)
percent = item.get('percent', -1)
untranslated_words = item.get('untranslated_words', -1)
locales.append({
'locale': loc,
'percent': percent,
'total': total,
'translated': translated,
'untranslated': total - translated,
'untranslated_words': untranslated_words
})
output['locales'] = locales
output['created'] = self.created
return output
def _mark_movement(self, data):
"""For each item, converts to a tuple of (movement, item)"""
ret = []
prev_day = None
for i, day in enumerate(data):
if i == 0:
ret.append(('', day))
prev_day = day
continue
if prev_day > day:
item = ('down', day)
elif prev_day < day:
item = ('up', day)
else:
item = ('equal', day)
prev_day = day
ret.append(item)
return ret
def history(self):
self.get_data()
data = self.data
highlight = self.highlight
app = self. | app
# Get a list of the locales we'll iterate through
locales = sorted(data[-1]['locales'].keys())
num_days = 14
# Truncate the data to what we want to look at
data = data[-num_days:]
if app:
get_data = lambda x: x['apps'][app]['percent']
else:
get_data = lambda x: x['percent']
hlocales = [loc for loc in locales if loc in highlight]
locales = [loc for loc in loc | ales if loc not in highlight]
output = {}
output['app'] = self.app or 'All'
output['headers'] = [item['created'] for item in data]
output['highlighted'] = sorted(
(loc, self._mark_movement(get_data(day['locales'][loc]) for day in data))
for loc in hlocales
)
output['locales'] = sorted(
(loc, self._mark_movement(get_data(day['locales'].get(loc, {'percent': 0.0})) for day in data))
for loc in locales
)
output['created'] = self.created
return output
|
can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import xlwt
from xlwt.Style import default_style
import cStringIO
from datetime import datetime
from openerp.osv.fields import datetime as datetime_field
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
import inspect
from types import CodeType
from openerp.report.report_sxw import *
from openerp import pooler
from openerp.tools.translate import translate, _
import logging
_logger = logging.getLogger(__name__)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class report_xls(report_sxw):
xls_types = {
'bool': xlwt.Row.set_cell_boolean,
'date': xlwt.Row.set_cell_date,
'text': xlwt.Row.set_cell_text,
'number': xlwt.Row.set_cell_number,
}
xls_types_default = {
'bool': False,
'date': None,
'text': '',
'number': 0,
}
# TO DO: move parameters infra to configurable data
# header/footer
hf_params = {
'font_size': 8,
'font_style': 'I', # B: Bold, I: Italic, U: Underline
}
# styles
_pfc = '26' # default pattern fore_color
_bc = '22' # borders color
decimal_format = '#,##0.00'
date_format = 'YYYY-MM-DD'
xls_styles = {
'xls_title': 'font: bold true, height 240;',
'bold': 'font: bold true;',
'underline': 'font: underline true;',
'italic': 'font: italic true;',
'fill': 'pattern: pattern solid, fore_color %s;' % _pfc,
'fill_blue': 'pattern: pattern solid, fore_color 27;',
'fill_grey': 'pattern: pattern solid, fore_color 22;',
'borders_all': 'borders: left thin, right thin, top thin, bottom thin, '
'left_colour %s, right_colour %s, top_colour %s, bottom_colour %s;' % (_bc, _bc, _bc, _bc),
'left': 'align: horz left;',
'center': 'align: horz center;',
'right': 'align: horz right;',
'wrap': 'align: wrap true;',
'top': 'align: vert top;',
'bottom': 'align: vert bottom;',
}
# TO DO: move parameters supra to configurable data
def create(self, cr, uid, ids, data, context=None):
self.pool = pooler.get_pool(cr.dbname)
self.cr = cr
self.uid = uid
report_obj = self.pool.get('ir.actions.report.xml')
report_ids = report_obj.search(cr, uid,
[('report_name', '=', self.name[7:])], context=context)
if report_ids:
report_xml = report_obj.browse(cr, uid, report_ids[0], context=context)
self.title = report_xml.name
if report_xml.report_type == 'xls':
return self.create_source_xls(cr, uid, ids, data, context)
elif context.get('xls_export'):
self.table = data.get('model') or self.table # use model from 'data' when no ir.actions.report.xml entry
return self.create_source_xls(cr, uid, ids, data, context)
return super(report_xls, self).create(cr, uid, ids, data, context)
def create_source_xls(self, cr, uid, ids, data, context=None):
if not context:
context = {}
parser_instance = self.parser(cr, uid, self.name2, context)
self.parser_instance = parser_instance
objs = self.getObjects(cr, uid, ids, context)
parser_instance.set_context(objs, data, ids, 'xls')
objs = parser_instance.localcontext['objects']
n = cStringIO.StringIO()
wb = xlwt.Workbook(encoding='utf-8')
_p = AttrDict(parser_instance.localcontext)
_xs = self.xls_styles
self.xls_headers = {
'standard': '',
}
report_date = datetime_field.context_timestamp(cr, uid, datetime.now(), context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
self.xls_footers = {
'standard': ('&L&%(font_size)s&%(font_style)s' + report_date +
'&R&%(font_size)s&%(font_style)s&P / &N') % self.hf_params,
}
self.generate_xls_report(_p, _xs, data, objs, wb)
wb.save(n)
n.seek(0)
return (n.read(), 'xls')
def render(self, wanted, col_specs, rowtype, render_space='empty'):
"""
returns 'evaluated' col_specs
Input:
- wanted: element from the wanted_list
- col_specs : cf. specs[1:] documented in xls_row_template method
- rowtype : 'header' or 'data'
- render_space : type dict, (caller_space + localcontext) if not specified
"""
if render_space == 'empty':
render_space = {}
caller_space = inspect.currentframe().f_back.f_back.f_locals
localcontext = self.parser_instance.localcontext
render_space.update(caller_space)
render_space.update(localcontext)
row = col_specs[wanted][rowtype][:]
for i in range(len(row)):
if isinstance(row[i], CodeType):
row[i] = eval(row[i], render_space)
row.insert(0, wanted)
#_logger.warn('row O = %s', row)
return row
def generate_xls_report(self, parser, xls_styles, data, objects, wb):
""" override this method to create your excel file """
raise NotImplementedError()
def xls_row_template(self, specs, wanted_list):
"""
Returns a row template.
Input :
- 'wanted_list': list of Columns that will be returned in the row_template
- 'specs': list with Column Characteristics
0: Column Name (from wanted_list)
1: Column Colspan
2: Column Size (unit = the width of the character ’0′ as it appears in the sheet’s default font)
3: Column Type
4: Column Data
5: Column Formula (or 'None' for Data)
6: Column Style
"""
r = []
col = 0
for w in wanted_list:
found = False
for s in specs:
if s[0] == w:
found = True
s_len = len(s)
c = list(s[:5])
# set write_cell_func or formula
if s_len > 5 and s[5] is not None:
c.append({'formula': s[5]})
| else:
c.append({'write_cell_func': report_ | xls.xls_types[c[3]]})
# Set custom cell style
if s_len > 6 and s[6] is not None:
c.append(s[6])
else:
c.append(None)
# Set cell formula
if s_len > 7 and s[7] is not None:
c.append(s[7])
else:
c.append(None)
r.append((col, c[1], c))
col += c[1]
break
if not found:
_logger.warn("report_xls.xls_row_template, column '%s' not found in specs", w)
return r
def xls_write_row(self, ws, row_pos, row_data, row_style=default_style, set_column_size=False):
r = ws.row(row_pos)
for col, size, spec in row_data:
data = spec[4]
formula = spec[5].get('formula') and xlwt.Formula(spec[5]['formula']) or None
style = spec[6] and spec[6] or row_style
if not data:
# if no data, use default values
data = report_xls.xls_types_default[spec[3]]
|
Volume 28 Issue 2, April 1981, Pages 305-350
http://dl.acm.org/citation.cfm?doid=322248.322255
"""
l_indices = list(indices)
for i, indx in enumerate(l_indices):
if not isinstance(indx, int):
l_indices[i] = self.index(indx)
e = 1
limits = []
for i, limit in enumerate(self.limits):
l = limit
if i in l_indices:
e = -e
l = (limit[0], limit[2] + 1, limit[1] - 1)
limits.append(l)
return Sum(e * self.function, *limits)
def summation(f, *symbols, **kwargs):
r"""
Compute the summation of f with respect to symbols.
The notation for symbols is similar to the notation used in Integral.
summation(f, (i, a, b)) computes the sum of f with respect to i from a to b,
i.e.,
::
b
____
\ `
summation(f, (i, a, b)) = ) f
/___,
i = a
If it cannot compute the sum, it returns an unevaluated Sum object.
Repeated sums can be computed by introducing additional symbols tuples::
>>> from sympy import summation, oo, symbols, log
>>> i, n, m = symbols('i n m', integer=True)
>>> summation(2*i - 1, (i, 1, n))
n**2
>>> summation(1/2**i, (i, 0, oo))
2
>>> summation(1/log(n)**n, (n, 2, oo))
Sum(log(n)**(-n), (n, 2, oo))
>>> summation(i, (i, 0, n), (n, 0, m))
m**3/6 + m**2/2 + m/3
>>> from sympy.abc import x
>>> from sympy import factorial
>>> summation(x**n/factorial(n), (n, 0, oo))
exp(x)
See Also
========
Sum
Product, product
"""
return Sum(f, *symbols, **kwargs).doit(deep=False)
def telescopic_direct(L, R, n, limits):
"""Returns the direct summation of the terms of a telescopic sum
L is the term with lower index
R is the term with higher index
n difference between the indexes of L and R
For example:
>>> from sympy.concrete.summations import telescopic_direct
>>> from sympy.abc import k, a, b
>>> telescopic_direct(1/k, -1/(k+2), 2, (k, a, b))
-1/(b + 2) - 1/(b + 1) + 1/(a + 1) + 1/a
"""
(i, a, b) = limits
s = 0
for m in range(n):
s += L.subs(i, a + m) + R.subs(i, b - m)
return s
def telescopic(L, R, limits):
'''Tries to perform the summation using the telescopic property
return None if not possible
'''
(i, a, b) = limits
if L.is_Add or R.is_Add:
return None
# We want to solve(L.subs(i, i + m) + R, m)
# First we try a simple match since this does things that
# solve doesn't do, e.g. solve(f(k+m)-f(k), m) fails
k = Wild("k")
sol = (-R).match(L.subs(i, i + k))
s = None
if sol and k in sol:
s = sol[k]
if not (s.is_Integer and L.subs(i, i + s) == -R):
# sometimes match fail(f(x+2).match(-f(x+k))->{k: -2 - 2x}))
s = None
# But there are things that match doesn't do that solve
# can do, e.g. determine that 1/(x + m) = 1/(1 - x) when m = 1
if s is None:
m = Dummy('m')
try:
sol = solve(L.subs(i, i + m) + R, m) or []
except NotImplementedError:
return None
sol = [si for si in sol if si.is_Integer and
(L.subs(i, i + si) + R).expand().is_zero]
if len(sol) != 1:
return None
s = sol[0]
if s < 0:
return telescopic_direct(R, L, abs(s), (i, a, b))
elif s > 0:
return telescopic_direct(L, R, s, (i, a, b))
def eval_sum(f, limits):
from sympy.concrete.delta import deltasummation, _has_simple_delta
from sympy.functions import KroneckerDelta
(i, a, b) = limits
if f is S.Zero:
return S.Zero
if i not in f.free_symbols:
return f*(b - a + 1)
if a == b:
return f.subs(i, a)
if isinstance(f, Piecewise):
if not any(i in arg.args[1].free_symbols for arg in f.args):
# Piecewise conditions do not depend on the dummy summation variable,
# therefore we can fold: Sum(Piecewise((e, c), ...), limits)
# --> Piecewise((Sum(e, limits), c), ...)
newargs = []
for arg in f.args:
newexpr = eval_sum(arg.expr, limits)
if newexpr is None:
return None
newargs.append((newexpr, arg.cond))
| return f.func(*newargs)
if f.has(KroneckerDelta) and _has_simple_delta(f, limits[0]):
return deltasummation(f, limits)
dif = b - a
definite = dif.is_Integer
# Doing it directly may be faster if there are very few terms.
if definite and (dif < 100):
return eval_sum_direct(f, (i, a, b))
if isinstance(f, Piecewise):
return None
# Try to do it symbolically. Even when the number of terms is known,
| # this can save time when b-a is big.
# We should try to transform to partial fractions
value = eval_sum_symbolic(f.expand(), (i, a, b))
if value is not None:
return value
# Do it directly
if definite:
return eval_sum_direct(f, (i, a, b))
def eval_sum_direct(expr, limits):
from sympy.core import Add
(i, a, b) = limits
dif = b - a
return Add(*[expr.subs(i, a + j) for j in range(dif + 1)])
def eval_sum_symbolic(f, limits):
from sympy.functions import harmonic, bernoulli
f_orig = f
(i, a, b) = limits
if not f.has(i):
return f*(b - a + 1)
# Linearity
if f.is_Mul:
L, R = f.as_two_terms()
if not L.has(i):
sR = eval_sum_symbolic(R, (i, a, b))
if sR:
return L*sR
if not R.has(i):
sL = eval_sum_symbolic(L, (i, a, b))
if sL:
return R*sL
try:
f = apart(f, i) # see if it becomes an Add
except PolynomialError:
pass
if f.is_Add:
L, R = f.as_two_terms()
lrsum = telescopic(L, R, (i, a, b))
if lrsum:
return lrsum
lsum = eval_sum_symbolic(L, (i, a, b))
rsum = eval_sum_symbolic(R, (i, a, b))
if None not in (lsum, rsum):
r = lsum + rsum
if not r is S.NaN:
return r
# Polynomial terms with Faulhaber's formula
n = Wild('n')
result = f.match(i**n)
if result is not None:
n = result[n]
if n.is_Integer:
if n >= 0:
if (b is S.Infinity and not a is S.NegativeInfinity) or \
(a is S.NegativeInfinity and not b is S.Infinity):
return S.Infinity
return ((bernoulli(n + 1, b + 1) - bernoulli(n + 1, a))/(n + 1)).expand()
elif a.is_Integer and a >= 1:
if n == -1:
return harmonic(b) - harmonic(a - 1)
else:
return harmonic(b, abs(n)) - harmonic(a - 1, abs(n))
if not (a.has(S.Infinity, S.NegativeInfinity) or
b.has(S.Infinity, S.NegativeInfinity)):
# Geometric terms
c1 = Wild('c1', exclude=[i])
c2 = Wild('c2', exclude=[i])
c3 = Wild('c3', exclude=[i])
e = f.match(c1**(c2*i + c3))
if e is not None:
p = (c1**c3).subs(e)
q = (c1**c2).subs(e)
r = p*(q**a - q**(b + 1))/(1 - q)
l = p*(b - a + 1)
return Piecewise((l, Eq(q, S.One)), (r, True))
r = gosper_sum(f, (i, a, b))
if not r in (None, S.NaN):
return r
return eval_sum_hyper(f_orig, (i, a, b))
def _eval_sum_hyper(f, i, a):
""" Returns (res, cond). Sums from a to oo. """
from sympy.functions import hyper
from sympy.simplify import hyperexpand, hypersimp, fraction, simplify
from sympy.polys.polytools import Poly, factor
if a != 0:
return _eval_sum_hyper(f.subs(i, i + a), i, 0)
if f.subs(i, 0) == 0:
if simplify(f.subs(i, Dummy('i' |
import Tkinter
import tkMessageBox
top = Tkinter.Tk()
def helloCallBack():
tkMessageBox.showinfo( "Hello Python", "Hello World")
B = Tkinter.Button(top, text ="Hello", command = helloCallBack)
B.pack()
top.mainloo | p()
| |
#! /usr/bin/env python
# coding=utf8
#
# Copyright (c) 2008 James Molloy, Jörg Pfähler, Matthew Iselin
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import tempfile
import shutil
import os
def doLibc(builddir, inputLibcA, glue_name, pedigree_c_name, ar, cc, libgcc):
| print "Building libc..."
tmpdir = tempfile.mkdtemp()
buildOut = builddir + "/libc"
olddir = os.getcwd()
os.chdir(tmpdir)
shutil.copy(inputLibcA, tmpdir + "/libc.a")
os.system(ar + " x libc.a")
glue = glue_name
shutil.copy(glue, tmpdir + "/" + os.path.ba | sename(glue_name))
shutil.copy(pedigree_c_name, tmpdir + "/" + os.path.basename(pedigree_c_name))
objs_to_remove = ["init", "getpwent", "signal", "fseek", "getcwd", "rename", "rewinddir", "opendir", "readdir", "closedir", "_isatty", "basename", "setjmp"]
for i in objs_to_remove:
try:
os.remove("lib_a-" + i + ".o")
except:
continue
res = os.system(ar + " x " + os.path.basename(glue_name))
if res != 0:
print " (failed)"
exit(res)
res = os.system(cc + " -nostdlib -shared -Wl,-shared -Wl,-soname,libc.so -o " + buildOut + ".so *.obj *.o -L. -lpedigree-c -lgcc")
if res != 0:
print " (failed)"
exit(res)
res = os.system(ar + " cru " + buildOut + ".a *.o *.obj")
if res != 0:
print " (failed)"
os.unlink("%s.so" % (buildOut,))
exit(res)
for i in os.listdir("."):
os.remove(i)
os.chdir(olddir)
os.rmdir(tmpdir)
import sys
doLibc(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6], "")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 Takeshi HASEGAWA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, |
# WITHOUT WARRANTIES O | R CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# This module is still in proof of concept, and subject to change.
#
from datetime import datetime
# IkaLog Output Plugin: Write 'Alive Squids' CSV data
#
class AliveSquidsCSV(object):
##
# Write a line to text file.
# @param self The Object Pointer.
# @param record Record (text)
#
def write_record(self, file, record):
try:
csv_file = open(file, "a")
csv_file.write(record)
csv_file.close
except:
print("CSV: Failed to write CSV File")
def write_alive_squids_csv(self, context, basename="ikabattle_log", debug=False):
csv = ["tick,y\n", "tick,y\n"]
for sample in context['game']['livesTrack']:
if debug:
print('lives sample = %s', sample)
time = sample[0]
del sample[0]
num_team = 0
for team in sample:
num_squid = 0
for alive in team:
num_squid = num_squid + 1
if alive:
csv[num_team] = "%s%d, %d\n" % (
csv[num_team], time, num_squid)
num_team = num_team + 1
num_team = 0
t = datetime.now()
t_str = t.strftime("%Y%m%d_%H%M")
for f in csv:
self.write_record('%s/%s_team%d.csv' %
(self.dest_dir, basename, num_team), f)
num_team = num_team + 1
def write_flags_csv(self, context, basename="ikabattle_log", debug=False):
# データがない場合は書かない
if len(context['game']['towerTrack']) == 0:
return
csv = "tick,pos,max,min\n"
for sample in context['game']['towerTrack']:
if debug:
print('tower sample = %s', sample)
time = sample[0]
sample = sample[1]
csv = "%s%d, %d, %d, %d\n" % (
csv, time, sample['pos'], sample['max'], sample['min'])
self.write_record('%s/%s_tower.csv' % (self.dest_dir, basename), csv)
##
# on_game_individual_result Hook
# @param self The Object Pointer
# @param context IkaLog context
#
def on_game_individual_result(self, context):
t = datetime.now()
basename = t.strftime("ikabattle_log_%Y%m%d_%H%M")
self.write_alive_squids_csv(context, basename=basename, debug=self.debug)
self.write_flags_csv(context, basename=basename, debug=self.debug)
##
# Constructor
# @param self The Object Pointer.
# @param dest_dir Destionation directory (Relative path, or absolute path)
def __init__(self, dir='./log/', debug=False):
self.dest_dir = dir
self.debug = debug
|
import pytest
from thefuck.types import Command
from thefuck.rules.git_push_without_commits import (
fix,
get_new_command,
match,
)
command = 'git push -u origin master'
expected_error = '''
error: src refspec master does not match any.
error: failed to push some refs to 'git@github.com:User/repo.git'
'''
@pytest.mark.parametrize('co | mmand', [Command(command, expected_error)])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command, result', [(
Command(command, expected_error),
fix.form | at(command=command),
)])
def test_get_new_command(command, result):
assert get_new_command(command) == result
|
# -*- coding: utf-8 -*-
from bpy.types import PropertyGroup
from bpy.props import StringProperty, IntProperty, BoolProperty, FloatProperty, FloatVectorProperty
from mmd_tools.core.bone import FnBone
def _updateMMDBoneAdditionalTransform(prop, context):
prop['is_additional_transform_dirty'] = True
p_bone = context.active_pose_bone
if p_bone and p_bone.mmd_bone.as_pointer() == prop.as_ | pointer():
FnBone.apply_additional_transformation(prop.id_data)
def _getAdditionalTransformBone(prop):
arm = prop.id_data
bone_id = prop.get('additional_transform_bone_id', - | 1)
if bone_id < 0:
return ''
fnBone = FnBone.from_bone_id(arm, bone_id)
if not fnBone:
return ''
return fnBone.pose_bone.name
def _setAdditionalTransformBone(prop, value):
arm = prop.id_data
prop['is_additional_transform_dirty'] = True
if value not in arm.pose.bones.keys():
prop['additional_transform_bone_id'] = -1
return
pose_bone = arm.pose.bones[value]
bone = FnBone(pose_bone)
prop['additional_transform_bone_id'] = bone.bone_id
class MMDBone(PropertyGroup):
name_j = StringProperty(
name='Name',
description='Japanese Name',
default='',
)
name_e = StringProperty(
name='Name(Eng)',
description='English Name',
default='',
)
bone_id = IntProperty(
name='Bone ID',
default=-1,
)
transform_order = IntProperty(
name='Transform Order',
description='Deformation tier',
min=0,
max=100,
)
is_visible = BoolProperty(
name='Visible',
description='Is visible',
default=True,
)
is_controllable = BoolProperty(
name='Controllable',
description='Is controllable',
default=True,
)
transform_after_dynamics = BoolProperty(
name='After Dynamics',
description='After physics',
default=False,
)
enabled_fixed_axis = BoolProperty(
name='Fixed Axis',
description='Use fixed axis',
default=False,
)
fixed_axis = FloatVectorProperty(
name='Fixed Axis',
description='Fixed axis',
subtype='XYZ',
size=3,
default=[0, 0, 0],
)
enabled_local_axes = BoolProperty(
name='Local Axes',
description='Use local axes',
default=False,
)
local_axis_x = FloatVectorProperty(
name='Local X-Axis',
description='Local x-axis',
subtype='XYZ',
size=3,
default=[1, 0, 0],
)
local_axis_z = FloatVectorProperty(
name='Local Z-Axis',
description='Local z-axis',
subtype='XYZ',
size=3,
default=[0, 0, 1],
)
is_tip = BoolProperty(
name='Tip Bone',
description='Is zero length bone',
default=False,
)
ik_rotation_constraint = FloatProperty(
name='IK Rotation Constraint',
description='The unit angle of IK',
subtype='ANGLE',
soft_min=0,
soft_max=4,
default=1,
)
has_additional_rotation = BoolProperty(
name='Additional Rotation',
description='Additional rotation',
default=False,
update=_updateMMDBoneAdditionalTransform,
)
has_additional_location = BoolProperty(
name='Additional Location',
description='Additional location',
default=False,
update=_updateMMDBoneAdditionalTransform,
)
additional_transform_bone = StringProperty(
name='Additional Transform Bone',
description='Additional transform bone',
set=_setAdditionalTransformBone,
get=_getAdditionalTransformBone,
update=_updateMMDBoneAdditionalTransform,
)
additional_transform_bone_id = IntProperty(
name='Additional Transform Bone ID',
default=-1,
update=_updateMMDBoneAdditionalTransform,
)
additional_transform_influence = FloatProperty(
name='Additional Transform Influence',
description='Additional transform influence',
default=1,
soft_min=-1,
soft_max=1,
update=_updateMMDBoneAdditionalTransform,
)
is_additional_transform_dirty = BoolProperty(
name='',
default=True
)
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# | a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distribu | ted on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import constants
from neutron.extensions import portbindings
from neutron.openstack.common import log
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import mech_agent
LOG = log.getLogger(__name__)
class OpenvswitchMechanismDriver(mech_agent.AgentMechanismDriverBase):
"""Attach to networks using openvswitch L2 agent.
The OpenvswitchMechanismDriver integrates the ml2 plugin with the
openvswitch L2 agent. Port binding with this driver requires the
openvswitch agent to be running on the port's host, and that agent
to have connectivity to at least one segment of the port's
network.
"""
def __init__(self):
super(OpenvswitchMechanismDriver, self).__init__(
constants.AGENT_TYPE_OVS,
portbindings.VIF_TYPE_OVS,
True)
def check_segment_for_agent(self, segment, agent):
mappings = agent['configurations'].get('bridge_mappings', {})
tunnel_types = agent['configurations'].get('tunnel_types', [])
LOG.debug(_("Checking segment: %(segment)s "
"for mappings: %(mappings)s "
"with tunnel_types: %(tunnel_types)s"),
{'segment': segment, 'mappings': mappings,
'tunnel_types': tunnel_types})
network_type = segment[api.NETWORK_TYPE]
if network_type == 'local':
return True
elif network_type in tunnel_types:
return True
elif network_type in ['flat', 'vlan']:
return segment[api.PHYSICAL_NETWORK] in mappings
else:
return False
|
# -*- | coding: utf-8 -*-
'''
Created on 2014-04-09
@author: Krzysztof Langner
'''
# import iclogger.file_logger as Logger
import iclogger.dynamodb_logger as Logger
if __name__ == "__main__":
Log | ger.app.run(debug=True) |
import unittest
class TestGenearate(unittest.TestCase):
def setUp(self):
self.seq = ra | nge(10)
def test_smoke(self):
"Basic smoke test that should pickup any silly errors"
import external_naginator
external_naginator.__name__ == "external_naginator"
if __name__ == '__main__':
unittest.ma | in()
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Description:
Find the contiguous subarray within an array (containing at least one number) which has the largest sum.
For example, given the array [−2,1,−3,4,−1,2,1,−5,4],
the contiguous subarray [4,−1,2,1] has the largest sum = 6.
More practic | e:
If you have figured out the O(n) solution,
try coding another solution using the divide and conquer approach, which is more subtle.
Tags: Array, Dynamic Programming, Divide and Conquer
'''
class Solution(object):
# O(n) runtime; O(1) space - 局部最优和全局最优解法
def maxSubArray(self, nums):
""" |
:type nums: List[int]
:rtype: int
"""
global_max, local_max = float("-inf"), 0
for i in nums:
local_max = max(i, i+local_max)
global_max = max(local_max, global_max)
return global_max
# Divide and Conquer
|
d limitations
# under the License.
import tempfile
from cloudfiles.errors import ContainerNotEmpty
from django import http
from django import template
from django.contrib import messages
from django.core.urlresolvers import reverse
from mox import IgnoreArg, IsA
from horizon import api
from horizon import test
from .tables import ContainersTable, ObjectsTable
CONTAINER_INDEX_URL = reverse('horizon:nova:containers:index')
class ContainerViewTests(test.BaseViewTests):
def setUp(self):
super(ContainerViewTests, self).setUp()
self.container = api.Container(None)
self.container.name = 'containerName'
self.container.size_used = 128
self.containers = (self.container,)
def test_index(self):
self.mox.StubOutWithMock(api, 'swift_get_containers')
api.swift_get_containers(
IsA(http.HttpRequest), marker=None).AndReturn(
([self.container], False))
self.mox.ReplayAll()
res = self.client.get(CONTAINER_INDEX_URL)
self.assertTemplateUsed(res, 'nova/containers/index.html')
self.assertIn('table', res.context)
containers = res.context['table'].data
self.assertEqual(len(containers), 1)
self.assertEqual(containers[0].name, 'containerName')
def test_delete_container(self):
self.mox.StubOutWithMock(api, 'swift_delete_container')
api.swift_delete_container(IsA(http.HttpRequest),
'containerName')
self.mox.ReplayAll()
action_string = "containers__delete__%s" % self.container.name
form_data = {"action": action_string}
req = self.factory.post(CONTAINER_INDEX_URL, form_data)
table = ContainersTable(req, self.co | ntainers)
handled = table.maybe_handle()
|
self.assertEqual(handled['location'], CONTAINER_INDEX_URL)
def test_delete_container_nonempty(self):
self.mox.StubOutWithMock(api, 'swift_delete_container')
exception = ContainerNotEmpty('containerNotEmpty')
api.swift_delete_container(
IsA(http.HttpRequest),
'containerName').AndRaise(exception)
self.mox.ReplayAll()
action_string = "containers__delete__%s" % self.container.name
form_data = {"action": action_string}
req = self.factory.post(CONTAINER_INDEX_URL, form_data)
table = ContainersTable(req, self.containers)
handled = table.maybe_handle()
self.assertEqual(handled['location'], CONTAINER_INDEX_URL)
def test_create_container_get(self):
res = self.client.get(reverse('horizon:nova:containers:create'))
self.assertTemplateUsed(res, 'nova/containers/create.html')
def test_create_container_post(self):
formData = {'name': 'containerName',
'method': 'CreateContainer'}
self.mox.StubOutWithMock(api, 'swift_create_container')
api.swift_create_container(
IsA(http.HttpRequest), u'containerName')
self.mox.ReplayAll()
res = self.client.post(reverse('horizon:nova:containers:create'),
formData)
self.assertRedirectsNoFollow(res, CONTAINER_INDEX_URL)
class ObjectViewTests(test.BaseViewTests):
CONTAINER_NAME = 'containerName'
def setUp(self):
class FakeCloudFile(object):
def __init__(self):
self.metadata = {}
def sync_metadata(self):
pass
super(ObjectViewTests, self).setUp()
swift_object = api.swift.SwiftObject(FakeCloudFile())
swift_object.name = "test_object"
swift_object.size = '128'
swift_object.container = api.swift.Container(None)
swift_object.container.name = 'container_name'
self.swift_objects = [swift_object]
def test_index(self):
self.mox.StubOutWithMock(api, 'swift_get_objects')
api.swift_get_objects(
IsA(http.HttpRequest),
self.CONTAINER_NAME,
marker=None).AndReturn((self.swift_objects, False))
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:nova:containers:object_index',
args=[self.CONTAINER_NAME]))
self.assertTemplateUsed(res, 'nova/objects/index.html')
self.assertItemsEqual(res.context['table'].data, self.swift_objects)
def test_upload_index(self):
res = self.client.get(reverse('horizon:nova:containers:object_upload',
args=[self.CONTAINER_NAME]))
self.assertTemplateUsed(res, 'nova/objects/upload.html')
def test_upload(self):
OBJECT_DATA = 'objectData'
OBJECT_FILE = tempfile.TemporaryFile()
OBJECT_FILE.write(OBJECT_DATA)
OBJECT_FILE.flush()
OBJECT_FILE.seek(0)
OBJECT_NAME = 'objectName'
formData = {'method': 'UploadObject',
'container_name': self.CONTAINER_NAME,
'name': OBJECT_NAME,
'object_file': OBJECT_FILE}
self.mox.StubOutWithMock(api, 'swift_upload_object')
api.swift_upload_object(IsA(http.HttpRequest),
unicode(self.CONTAINER_NAME),
unicode(OBJECT_NAME),
OBJECT_DATA).AndReturn(self.swift_objects[0])
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:nova:containers:object_upload',
args=[self.CONTAINER_NAME]))
self.assertContains(res, 'enctype="multipart/form-data"')
res = self.client.post(reverse('horizon:nova:containers:object_upload',
args=[self.CONTAINER_NAME]),
formData)
self.assertRedirectsNoFollow(res,
reverse('horizon:nova:containers:object_index',
args=[self.CONTAINER_NAME]))
def test_delete(self):
self.mox.StubOutWithMock(api, 'swift_delete_object')
api.swift_delete_object(
IsA(http.HttpRequest),
self.CONTAINER_NAME, self.swift_objects[0].name)
self.mox.ReplayAll()
OBJECT_INDEX_URL = reverse('horizon:nova:containers:object_index',
args=[self.CONTAINER_NAME])
action_string = "objects__delete__%s" % self.swift_objects[0].name
form_data = {"action": action_string}
req = self.factory.post(OBJECT_INDEX_URL, form_data)
kwargs = {"container_name": self.CONTAINER_NAME}
table = ObjectsTable(req, self.swift_objects, **kwargs)
handled = table.maybe_handle()
self.assertEqual(handled['location'], OBJECT_INDEX_URL)
def test_download(self):
OBJECT_DATA = 'objectData'
OBJECT_NAME = 'objectName'
self.mox.StubOutWithMock(api, 'swift_get_object_data')
self.mox.StubOutWithMock(api.swift, 'swift_get_object')
api.swift.swift_get_object(IsA(http.HttpRequest),
unicode(self.CONTAINER_NAME),
unicode(OBJECT_NAME)) \
.AndReturn(self.swift_objects[0])
api.swift_get_object_data(IsA(http.HttpRequest),
unicode(self.CONTAINER_NAME),
unicode(OBJECT_NAME)).AndReturn(OBJECT_DATA)
self.mox.ReplayAll()
res = self.client.get(reverse(
'horizon:nova:containers:object_download',
args=[self.CONTAINER_NAME, OBJECT_NAME]))
self.assertEqual(res.content, OBJECT_DATA)
self.assertTrue(res.has_header('Content-Disposition'))
def test_copy_index(self):
OBJECT_NAME = 'objectName'
container = self.mox.CreateMock(api.Container)
container.name = self.CONTAINER_NAME
self.mox.StubOutWithMock(api, 'swift_get_containers')
api.swift_get_containers(
IsA(http.HttpRequest)).AndReturn(([conta |
ng it."),
'categ_ids': fields.many2many('calendar.event.type', 'meeting_category_rel', 'event_id', 'type_id', 'Tags'),
'attendee_ids': fields.one2many('calendar.attendee', 'event_id', 'Attendees', ondelete='cascade'),
'partner_ids': fields.many2many('res.partner', 'calendar_event_res_partner_rel', string='Attendees', states={'done': [('readonly', True)]}),
'alarm_ids': fields.many2many('calendar.alarm', 'calendar_alarm_calendar_event_rel', string='Reminders', ondelete="restrict", copy=False),
}
def _get_default_partners(self, cr, uid, ctx=None):
ret = [self.pool['res.users'].browse(cr, uid, uid, context=ctx).partner_id.id]
active_id = ctx.get('active_id')
if ctx.get('active_model') == 'res.partner' and active_id:
if active_id not in ret:
ret.append(active_id)
return ret
_defaults = {
'end_type': 'count',
'count': 1,
'rrule_type': False,
'allday': False,
'state': 'draft',
'class': 'public',
'show_as': 'busy',
'month_by': 'date',
'interval': 1,
'active': 1,
'user_id': lambda self, cr, uid, ctx: uid,
'partner_ids': _get_default_partners,
}
def _check_closing_date(self, cr, uid, ids, context=None):
for event in self.browse(cr, uid, ids, context=context):
if event.start_datetime and event.stop_datetime and event.stop_datetime < event.start_datetime:
return False
if event.start_date and event.stop_date and event.stop_date < event.start_date:
return False
return True
_constraints = [
(_check_closing_date, 'Error ! End date cannot be set before start date.', ['start_datetime', 'stop_datetime', 'start_date', 'stop_date'])
]
def onchange_allday(self, cr, uid, ids, start=False, end=False, starttime=False, endtime=False, startdatetime=False, enddatetime=False, checkallday=False, context=None):
value = {}
if not ((starttime and endtime) or (start and end)): # At first intialize, we have not datetime
return value
if checkallday: # from datetime to date
startdatetime = startdatetime or start
if startdatetime:
start = datetime.strptime(startdatetime, DEFAULT_SERVER_DATETIME_FORMAT)
value['start_date'] = datetime.strftime(start, DEFAULT_SERVER_DATE_FORMAT)
enddatetime = enddatetime or end
if enddatetime:
end = datetime.strptime(enddatetime, DEFAULT_SERVER_DATETIME_FORMAT)
value['stop_date'] = datetime.strftime(end, DEFAULT_SERVER_DATE_FORMAT)
else: # from date to datetime
user = self.pool['res.users'].browse(cr, uid, uid, context)
tz = pytz.timezone(user.tz) if user.tz else pytz.utc
if | starttime:
start = openerp.fields.Datetime.from_string(starttime)
startdate = tz.localize(start) # Add "+hh:mm" timezone
startdate = startdate.replace(hour=8) # Set 8 AM in localtime
| startdate = startdate.astimezone(pytz.utc) # Convert to UTC
value['start_datetime'] = datetime.strftime(startdate, DEFAULT_SERVER_DATETIME_FORMAT)
elif start:
value['start_datetime'] = start
if endtime:
end = datetime.strptime(endtime.split(' ')[0], DEFAULT_SERVER_DATE_FORMAT)
enddate = tz.localize(end).replace(hour=18).astimezone(pytz.utc)
value['stop_datetime'] = datetime.strftime(enddate, DEFAULT_SERVER_DATETIME_FORMAT)
elif end:
value['stop_datetime'] = end
return {'value': value}
def onchange_duration(self, cr, uid, ids, start=False, duration=False, context=None):
value = {}
if not (start and duration):
return value
start = datetime.strptime(start, DEFAULT_SERVER_DATETIME_FORMAT)
value['stop_date'] = (start + timedelta(hours=duration)).strftime(DEFAULT_SERVER_DATE_FORMAT)
value['stop'] = (start + timedelta(hours=duration)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
value['stop_datetime'] = (start + timedelta(hours=duration)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
value['start_date'] = start.strftime(DEFAULT_SERVER_DATE_FORMAT)
value['start'] = start.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return {'value': value}
def onchange_dates(self, cr, uid, ids, fromtype, start=False, end=False, checkallday=False, allday=False, context=None):
"""Returns duration and end date based on values passed
@param ids: List of calendar event's IDs.
"""
value = {}
if checkallday != allday:
return value
value['allday'] = checkallday # Force to be rewrited
if allday:
if fromtype == 'start' and start:
start = datetime.strptime(start, DEFAULT_SERVER_DATE_FORMAT)
value['start_datetime'] = datetime.strftime(start, DEFAULT_SERVER_DATETIME_FORMAT)
value['start'] = datetime.strftime(start, DEFAULT_SERVER_DATETIME_FORMAT)
if fromtype == 'stop' and end:
end = datetime.strptime(end, DEFAULT_SERVER_DATE_FORMAT)
value['stop_datetime'] = datetime.strftime(end, DEFAULT_SERVER_DATETIME_FORMAT)
value['stop'] = datetime.strftime(end, DEFAULT_SERVER_DATETIME_FORMAT)
return {'value': value}
def new_invitation_token(self, cr, uid, record, partner_id):
return uuid.uuid4().hex
def create_attendees(self, cr, uid, ids, context=None):
if context is None:
context = {}
user_obj = self.pool['res.users']
current_user = user_obj.browse(cr, uid, uid, context=context)
res = {}
for event in self.browse(cr, uid, ids, context):
attendees = {}
for att in event.attendee_ids:
attendees[att.partner_id.id] = True
new_attendees = []
new_att_partner_ids = []
for partner in event.partner_ids:
if partner.id in attendees:
continue
access_token = self.new_invitation_token(cr, uid, event, partner.id)
values = {
'partner_id': partner.id,
'event_id': event.id,
'access_token': access_token,
'email': partner.email,
}
if partner.id == current_user.partner_id.id:
values['state'] = 'accepted'
att_id = self.pool['calendar.attendee'].create(cr, uid, values, context=context)
new_attendees.append(att_id)
new_att_partner_ids.append(partner.id)
if not current_user.email or current_user.email != partner.email:
mail_from = current_user.email or tools.config.get('email_from', False)
if not context.get('no_email'):
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, att_id, email_from=mail_from, context=context):
self.message_post(cr, uid, event.id, body=_("An invitation email has been sent to attendee %s") % (partner.name,), subtype="calendar.subtype_invitation", context=context)
if new_attendees:
self.write(cr, uid, [event.id], {'attendee_ids': [(4, att) for att in new_attendees]}, context=context)
if new_att_partner_ids:
self.message_subscribe(cr, uid, [event.id], new_att_partner_ids, context=context)
# We remove old attendees who are not in partner_ids now.
all_partner_ids = [part.id for part in event.partner_ids]
all_part_attendee_ids = [att.partner_id.id for att in event.attendee_ids]
all_attendee_ids = [att.id for att in event.attendee_ids]
partner_ids_to_remove = map(lambda x: x, set(all_part_attendee_ids + new_att_partner_ids) - set(al |
import sys, time
from django.conf import settings
from django.db import connection, transaction, backend
from django.core import management, mail
from django.dispatch import dispatcher
from django.test import signals
from django.template import Template
# The prefix to put on the default database name when creating
# the test database.
TEST_DATABASE_PREFIX = 'test_'
def instrumented_test_render(self, context):
"""An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
dispatcher.send(signal=signals.template_rendered, sender=self, template=self, context=context)
return self.nodelist.render(context)
class TestSMTPConnection(object):
"""A substitute SMTP connection for use during test sessions.
The test connection stores email messages in a dummy outbox,
rather than sending them out on the wire.
"""
def __init__(*args, **kwargs):
pass
def open(self):
"Mock the SMTPConnection open() interface"
pass
def close(self):
"Mock the SMTPConnection close() interface"
pass
def send_messages(self, messages):
"Redirect messages to the dummy outbox"
mail.outbox.extend(messages)
def setup_test_environment():
"""Perform any global pre-test setup. This involves:
- Installing the instrumented test renderer
- Diverting the email sending functions to a test buffer
"""
Template.original_render = Template.render
Template.render = instrumented_test_render
mail.original_SMTPConnection = mail.SMTPConnection
mail.SMTPConnection = TestSMTPConnection
mail.outbox = []
def teardown_test_environment():
"""Perform any global post-test teardown. This involves:
- Restoring the orig | inal test renderer
- Restoring the email sending functions
"""
Template.render = Template.original_render
del Template.original_render
mail.SMTPConnection = mail.original_SMTPConnection
del mail.original_SMTPConnection
del mail.outbox
def _set_autocommit(connection):
"Make sure a connection is in autoc | ommit mode."
if hasattr(connection.connection, "autocommit"):
connection.connection.autocommit(True)
elif hasattr(connection.connection, "set_isolation_level"):
connection.connection.set_isolation_level(0)
def get_mysql_create_suffix():
suffix = []
if settings.TEST_DATABASE_CHARSET:
suffix.append('CHARACTER SET %s' % settings.TEST_DATABASE_CHARSET)
if settings.TEST_DATABASE_COLLATION:
suffix.append('COLLATE %s' % settings.TEST_DATABASE_COLLATION)
return ' '.join(suffix)
def get_postgresql_create_suffix():
assert settings.TEST_DATABASE_COLLATION is None, "PostgreSQL does not support collation setting at database creation time."
if settings.TEST_DATABASE_CHARSET:
return "WITH ENCODING '%s'" % settings.TEST_DATABASE_CHARSET
return ''
def create_test_db(verbosity=1, autoclobber=False):
if verbosity >= 1:
print "Creating test database..."
# If we're using SQLite, it's more convenient to test against an
# in-memory database.
if settings.DATABASE_ENGINE == "sqlite3":
TEST_DATABASE_NAME = ":memory:"
else:
suffix = {
'postgresql': get_postgresql_create_suffix,
'postgresql_psycopg2': get_postgresql_create_suffix,
'mysql': get_mysql_create_suffix,
'mysql_old': get_mysql_create_suffix,
}.get(settings.DATABASE_ENGINE, lambda: '')()
if settings.TEST_DATABASE_NAME:
TEST_DATABASE_NAME = settings.TEST_DATABASE_NAME
else:
TEST_DATABASE_NAME = TEST_DATABASE_PREFIX + settings.DATABASE_NAME
# Create the test database and connect to it. We need to autocommit
# if the database supports it because PostgreSQL doesn't allow
# CREATE/DROP DATABASE statements within transactions.
cursor = connection.cursor()
_set_autocommit(connection)
try:
cursor.execute("CREATE DATABASE %s %s" % (backend.quote_name(TEST_DATABASE_NAME), suffix))
except Exception, e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = raw_input("It appears the test database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_DATABASE_NAME)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test database..."
cursor.execute("DROP DATABASE %s" % backend.quote_name(TEST_DATABASE_NAME))
if verbosity >= 1:
print "Creating test database..."
cursor.execute("CREATE DATABASE %s %s" % (backend.quote_name(TEST_DATABASE_NAME), suffix))
except Exception, e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
connection.close()
settings.DATABASE_NAME = TEST_DATABASE_NAME
management.syncdb(verbosity, interactive=False)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
cursor = connection.cursor()
def destroy_test_db(old_database_name, verbosity=1):
# Unless we're using SQLite, remove the test database to clean up after
# ourselves. Connect to the previous database (not the test database)
# to do so, because it's not allowed to delete a database while being
# connected to it.
if verbosity >= 1:
print "Destroying test database..."
connection.close()
TEST_DATABASE_NAME = settings.DATABASE_NAME
settings.DATABASE_NAME = old_database_name
if settings.DATABASE_ENGINE != "sqlite3":
cursor = connection.cursor()
_set_autocommit(connection)
time.sleep(1) # To avoid "database is being accessed by other users" errors.
cursor.execute("DROP DATABASE %s" % backend.quote_name(TEST_DATABASE_NAME))
connection.close()
|
# ============================================================================
# FILE: size.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
from defx.base.column import Base, Highlights
from defx.context import Context
from defx.util import Nvim, readable, Candidate
import typing
class Column(Base):
def __init__(self, vim: Nvim) -> None:
super().__init__(vim)
self.name = 'size'
self.has_get_with_highlights = True
self._length = | 9
def get_with_highlights(
self, context: Context, candidate: Candidate
) -> typing.Tuple[str, Highlights]:
path = candidate['act | ion__path']
if not readable(path) or path.is_dir():
return (' ' * self._length, [])
size = self._get_size(path.stat().st_size)
text = '{:>6s}{:>3s}'.format(size[0], size[1])
return (text, [(self.highlight_name, self.start, self._length)])
def _get_size(self, size: float) -> typing.Tuple[str, str]:
multiple = 1024
suffixes = ['KB', 'MB', 'GB', 'TB']
if size < multiple:
return (str(size), 'B')
for suffix in suffixes:
size /= multiple
if size < multiple:
return ('{:.1f}'.format(size), suffix)
return ('INF', '')
def length(self, context: Context) -> int:
return self._length
def highlight_commands(self) -> typing.List[str]:
commands: typing.List[str] = []
commands.append(
f'highlight default link {self.highlight_name} Constant')
return commands
|
import random
from Person import Person
class House(object):
def __init__(self):
self.rooms = []
self.actors = []
def __str__(self):
house_string = ""
for room in self.rooms:
house_string = house_string + str(room) + "\n\n"
return house_string[:-2]
def __iter__(self):
return iter(self.rooms)
def getDictionary(self):
return_dict = {}
for room in self.rooms:
return_dict[room.name] = room.getDictionary()
return return_dict
def getRooms(self):
return self.rooms
def placePersonInRoom(self, person):
for room in self.rooms:
if person in room.actors_in_room:
room.removeActor(person)
place | d = False
while not placed:
i = random.randint(0, len(self.rooms) | - 1)
if self.rooms[i].can_enter:
self.rooms[i].addActor(person)
placed = True
def addRooms(self, rooms):
for room in rooms:
if room not in self.rooms:
self.rooms.append(room)
def hasRoomType(self, roomType):
for room in self.rooms:
if isinstance(room, roomType):
return True
return False
def tick(self):
for actor in self.actors:
actor.tick()
def toString_people(self):
string = "People in house\n[name,\t\tage,\thngr,\tbthrm,\tstatus]:\n"
for actor in self.actors:
if isinstance(actor, Person):
if len(actor.name) < 6:
string = (string + "[" + actor.name + ",\t\t" + str(actor.age) + ",\t" +
str(actor.hunger) + ",\t" + str(actor.bathroom_need) + ",\t" +
actor.status + "]\n")
else:
string = (string + "[" + actor.name + ",\t" + str(actor.age) + ",\t" +
str(actor.hunger) + ",\t" + str(actor.bathroom_need) + ",\t" +
actor.status + "]\n")
return string
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module that computes statistics used to validate sparse features.
Currently, this module generates the following statistics for each
sparse feature:
- missing_value: Number of examples missing the value_feature.
- missing_index: A RankHistogram from index_name to the number of examples
missing the corresponding index_feature.
- min_length_diff: A RankHistogram from index_name to the minimum of
len(index_feature) - len(value_feature).
- max_length_diff: A RankHistogram from index_name to the maximum of
len(index_feature) - len(value_feature).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Dict, Iterable, List, Text, Tuple, Union
from tensorflow_data_validation import types
from tensorflow_data_validation.statistics.generators import stats_generator
from tensorflow_data_validation.statistics.generators.constituents import count_missing_generator
from tensorflow_data_validation.statistics.generators.constituents import length_diff_generator
from tensorflow_metadata. | proto.v0 import schema_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
# TODO(https://issues.apache.org/jira/ | browse/SPARK-22674): Switch to
# `collections.namedtuple` or `typing.NamedTuple` once the Spark issue is
# resolved.
from tfx_bsl.types import tfx_namedtuple # pylint: disable=g-bad-import-order
# LINT.IfChange(custom_stat_names)
_MAX_LENGTH_DIFF_NAME = 'max_length_diff'
_MIN_LENGTH_DIFF_NAME = 'min_length_diff'
_MISSING_INDEX_NAME = 'missing_index'
_MISSING_VALUE_NAME = 'missing_value'
# LINT.ThenChange(../../anomalies/schema.cc:sparse_feature_custom_stat_names)
# Named tuple containing the FeaturePaths for the value and index features
# that comprise a given sparse feature.
_SparseFeatureComponents = tfx_namedtuple.namedtuple(
'_SparseFeatureComponents', ['value_feature', 'index_features'])
def _get_all_sparse_features(
schema: schema_pb2.Schema
) -> List[Tuple[types.FeaturePath, schema_pb2.SparseFeature]]:
"""Returns all sparse features in a schema."""
def _recursion_helper(
parent_path: types.FeaturePath, container: Union[schema_pb2.Schema,
schema_pb2.StructDomain]
) -> List[Tuple[types.FeaturePath, schema_pb2.SparseFeature]]:
"""Helper function that is used in finding sparse features in a tree."""
result = []
for sf in container.sparse_feature:
# Sparse features do not have a struct_domain, so they cannot be parent
# features. Thus, once this reaches a sparse feature, add it to the
# result.
result.append((parent_path.child(sf.name), sf))
for f in container.feature:
if f.type == schema_pb2.STRUCT:
result.extend(
_recursion_helper(parent_path.child(f.name), f.struct_domain))
return result
return _recursion_helper(types.FeaturePath([]), schema)
def _get_components(
sparse_features: Iterable[Tuple[types.FeaturePath,
schema_pb2.SparseFeature]]
) -> Dict[types.FeaturePath, _SparseFeatureComponents]:
"""Returns the index and value feature paths that comprise sparse features."""
# A dict mapping sparse feature paths to their component index and value
# feature paths.
sparse_feature_components = dict()
# The index and value features for a given sparse feature have the same parent
# path as the sparse feature.
for path, feature in sparse_features:
parent_path = path.parent()
value_feature = parent_path.child(feature.value_feature.name)
index_features = set()
for index_feature in feature.index_feature:
index_features.add(parent_path.child(index_feature.name))
sparse_feature_components[path] = _SparseFeatureComponents(
value_feature, index_features)
return sparse_feature_components
class SparseFeatureStatsGenerator(stats_generator.CompositeStatsGenerator):
"""Generates statistics for sparse features."""
def __init__(self,
schema: schema_pb2.Schema,
name: Text = 'SparseFeatureStatsGenerator') -> None:
"""Initializes a sparse feature statistics generator.
Args:
schema: A required schema for the dataset.
name: An optional unique name associated with the statistics generator.
"""
self._sparse_feature_components = _get_components(
_get_all_sparse_features(schema))
# Create length diff generators for each index / value pair and count
# missing generator for all paths.
constituents = []
for _, (value, indices) in self._sparse_feature_components.items():
required_paths = [value] + list(indices)
constituents.append(
count_missing_generator.CountMissingGenerator(value, required_paths))
for index in indices:
constituents.append(
length_diff_generator.LengthDiffGenerator(index, value,
required_paths))
constituents.append(
count_missing_generator.CountMissingGenerator(
index, required_paths))
super(SparseFeatureStatsGenerator, self).__init__(name, constituents,
schema)
def extract_composite_output(self, accumulator):
stats = statistics_pb2.DatasetFeatureStatistics()
for feature_path, (value,
indices) in self._sparse_feature_components.items():
required_paths = [value] + list(indices)
feature_stats = stats.features.add(path=feature_path.to_proto())
feature_stats.custom_stats.add(
name=_MISSING_VALUE_NAME,
num=accumulator[count_missing_generator.CountMissingGenerator.key(
value, required_paths)])
index_features_num_missing_histogram = statistics_pb2.RankHistogram()
max_length_diff_histogram = statistics_pb2.RankHistogram()
min_length_diff_histogram = statistics_pb2.RankHistogram()
for index in sorted(indices):
index_label = index.steps()[-1]
missing_bucket = index_features_num_missing_histogram.buckets.add()
missing_bucket.label = index_label
missing_bucket.sample_count = accumulator[
count_missing_generator.CountMissingGenerator.key(
index, required_paths)]
min_diff, max_diff = accumulator[
length_diff_generator.LengthDiffGenerator.key(
index, value, required_paths)]
max_length_bucket = max_length_diff_histogram.buckets.add()
max_length_bucket.label = index_label
max_length_bucket.sample_count = max_diff
min_length_bucket = min_length_diff_histogram.buckets.add()
min_length_bucket.label = index_label
min_length_bucket.sample_count = min_diff
feature_stats.custom_stats.add(
name=_MISSING_INDEX_NAME,
rank_histogram=index_features_num_missing_histogram)
feature_stats.custom_stats.add(
name=_MAX_LENGTH_DIFF_NAME, rank_histogram=max_length_diff_histogram)
feature_stats.custom_stats.add(
name=_MIN_LENGTH_DIFF_NAME, rank_histogram=min_length_diff_histogram)
return stats
|
# -*- coding: utf-8 -*-
from openerp import models, fields, api
class School(models.Model):
_name = 'royalty.school'
name = fields.Char('Name', size=255, required=True)
address_line1 = fields.Char( 'Address 1', size=255 )
address_line2 = fields.Char( 'Address 2', size=255 )
city = fields.Char( 'City', size=30 )
state = fields.Char( 'State', size=2 )
zip_code = fields.Char( 'ZipCode', size=10 )
abbreviation = fields.Char( 'O | rganization Abbreviation', size=75 )
active = fields.Boolean( 'Organization Active', default=True )
old_id = fields.Integer( 'Legacy ID' | )
products = fields.One2many( 'product.product', 'school_id', 'Products' )
contacts = fields.One2many( 'royalty.contact', 'school_id', 'Contacts' )
|
from unittest import mock
import pytest
from mitmproxy.test import tflow
from mitmproxy.net.http import http1
from mitmproxy.net.tcp import TCPClient
from mitmproxy.test.tutils import treq
from ... import tservers
class TestHTTPFlow:
def test_repr(self): |
f = tflow.tflow(resp=True, err=True)
assert repr(f)
class TestInvalidRequests(tservers.HTTPProxyT | est):
ssl = True
def test_double_connect(self):
p = self.pathoc()
with p.connect():
r = p.request("connect:'%s:%s'" % ("127.0.0.1", self.server2.port))
assert r.status_code == 400
assert b"Unexpected CONNECT" in r.content
def test_relative_request(self):
p = self.pathoc_raw()
with p.connect():
r = p.request("get:/p/200")
assert r.status_code == 400
assert b"Invalid HTTP request form" in r.content
class TestProxyMisconfiguration(tservers.TransparentProxyTest):
def test_absolute_request(self):
p = self.pathoc()
with p.connect():
r = p.request("get:'http://localhost:%d/p/200'" % self.server.port)
assert r.status_code == 400
assert b"misconfiguration" in r.content
class TestExpectHeader(tservers.HTTPProxyTest):
def test_simple(self):
client = TCPClient(("127.0.0.1", self.proxy.port))
client.connect()
# call pathod server, wait a second to complete the request
client.wfile.write(
b"POST http://localhost:%d/p/200 HTTP/1.1\r\n"
b"Expect: 100-continue\r\n"
b"Content-Length: 16\r\n"
b"\r\n" % self.server.port
)
client.wfile.flush()
assert client.rfile.readline() == b"HTTP/1.1 100 Continue\r\n"
assert client.rfile.readline() == b"\r\n"
client.wfile.write(b"0123456789abcdef\r\n")
client.wfile.flush()
resp = http1.read_response(client.rfile, treq())
assert resp.status_code == 200
client.finish()
client.close()
class TestHeadContentLength(tservers.HTTPProxyTest):
def test_head_content_length(self):
p = self.pathoc()
with p.connect():
resp = p.request(
"""head:'%s/p/200:h"Content-Length"="42"'""" % self.server.urlbase
)
assert resp.headers["Content-Length"] == "42"
class TestStreaming(tservers.HTTPProxyTest):
@pytest.mark.parametrize('streaming', [True, False])
def test_streaming(self, streaming):
class Stream:
def requestheaders(self, f):
f.request.stream = streaming
def responseheaders(self, f):
f.response.stream = streaming
def assert_write(self, v):
if streaming:
assert len(v) <= 4096
return self.o.write(v)
self.master.addons.add(Stream())
p = self.pathoc()
with p.connect():
with mock.patch("mitmproxy.net.tcp.Writer.write", side_effect=assert_write, autospec=True):
# response with 10000 bytes
r = p.request("post:'%s/p/200:b@10000'" % self.server.urlbase)
assert len(r.content) == 10000
# request with 10000 bytes
assert p.request("post:'%s/p/200':b@10000" % self.server.urlbase)
|
#encoding=utf-8
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__author__ = "liyi"
__date__ = "2017-07-06"
import os
import sys
import argparse
import collections
import logging
import codecs
import charset
def generate_vocab(source_paths, save_path, delimiter=" ", max_vocab_size=150000, min_freq=10,
filter_en=True, filter_num=True, verb=True):
# Counter for all tokens in the vocabulary
vocab_cnt = collections.C | ounter()
for i, path in enumerate(source_paths):
f = codecs.open(path, "r", "utf-8")
while True:
line = f.readline()
if not line:
break
if delimiter == "":
tokens = list(line.strip())
else:
tokens = line.strip().split(delimiter)
tokens = [_ for _ in tokens if len(_) > 0]
vocab_cnt.update(t | okens)
##filter vocab
if filter_en is True or filter_num is True:
new_vocab_cnt = collections.Counter()
for word in vocab_cnt:
skip = False
for index, char in enumerate(word):
if filter_en and charset.is_alphabet(char):
skip = True
elif filter_num and charset.is_number(char):
skip = True
elif charset.is_chinese_punctuation(char): ##solve 。榜样
if len(word) > 1:
print("{} is not right".format(word))
skip = True
if skip is True:
break
if skip is False:
new_vocab_cnt[word] = vocab_cnt[word]
vocab_cnt = new_vocab_cnt
logging.info("Found %d unique tokens in the vocabulary.", len(vocab_cnt))
# Filter tokens below the frequency threshold
if min_freq > 0:
filtered_tokens = [(w, c) for w, c in vocab_cnt.most_common()
if c > min_freq]
cnt = collections.Counter(dict(filtered_tokens))
logging.info("Found %d unique tokens with frequency > %d.",
len(vocab_cnt), min_freq)
# Sort tokens by 1. frequency 2. lexically to break ties
word_with_counts = vocab_cnt.most_common()
word_with_counts = sorted(
word_with_counts, key=lambda x: (x[1], x[0]), reverse=True)
# Take only max-vocab
if max_vocab_size is not None:
word_with_counts = word_with_counts[:max_vocab_size]
if save_path is not None:
save_path = os.path.abspath(save_path)
if os.path.exists(os.path.dirname(save_path)) == False:
os.makedirs(os.path.dirname(save_path))
with codecs.open(save_path, "w", "utf-8") as f:
for word, count in word_with_counts:
# print("{}\t{}".format(word, count))
f.write("{}\t{}\n".format(word, count))
print("generate vocab path {}".format(save_path))
return word_with_counts
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate vocabulary for a tokenized text file.")
parser.add_argument(
"--min_frequency",
dest="min_frequency",
type=int,
default=0,
help="Minimum frequency of a word to be included in the vocabulary.")
parser.add_argument(
"--max_vocab_size",
dest="max_vocab_size",
type=int,
help="Maximum number of tokens in the vocabulary")
parser.add_argument(
"--downcase",
dest="downcase",
type=bool,
help="If set to true, downcase all text before processing.",
default=False)
parser.add_argument(
"infile",
nargs="+",
type=str,
help="Input tokenized text file to be processed.")
parser.add_argument(
"--delimiter",
dest="delimiter",
type=str,
default=" ",
help="Delimiter character for tokenizing. Use \" \" and \"\" for word and char level respectively."
)
args = parser.parse_args()
|
name = raw_inp | ut("Enter your name:")
print "Hello" , na | me
|
rd_set,
implicit_first_group_key=None,
keyword_repeat_allowed=True,
group_repeated_keywords=None,
only_found_keywords=False,
):
"""
Return dictionary with keywords as keys and following arguments as value.
For example when keywords are "first" and "seconds" then for arg_list
["first", 1, 2, "second", 3] it returns {"first": [1, 2], "second": [3]}
list arg_list is commandline arguments containing keywords
set keyword_set contain all expected keywords
string implicit_first_group_key is the key for capturing of arguments before
the occurrence of the first keyword. implicit_first_group_key is not
a keyword => its occurence in args is considered as ordinary argument.
bool keyword_repeat_allowed is the flag to turn on/off checking the
uniqueness of each keyword in arg_list.
list group_repeated_keywords contains keywords for which each occurence is
packed separately. For example when keywords are "first" and "seconds"
and group_repeated_keywords is ["first"] then for arg_list
["first", 1, 2, "second", 3, "first", 4] it returns
{"first": [[1, 2], [4]], "second": [3]}.
For these keywords is allowed repeating.
bool only_found_keywords is flag for deciding to (not)contain keywords
that do not appeared in arg_list.
"""
def get_keywords_for_gr | ouping():
if not group_repeated_keywords:
return []
# implicit_first_group_key is not keyword: when it is in
# group_repeated_keywords but not in keyword_set is considered as
# unknown.
unknown_keywords = set(group_repeated_keywords) - set(keyword_set)
if unknown_keywords:
# to avoid developer mistake
raise AssertionError(
"Keywords in grouping not in keyword | set: {0}".format(
", ".join(unknown_keywords)
)
)
return group_repeated_keywords
def get_completed_groups():
completed_groups = groups.copy()
if not only_found_keywords:
for keyword in keyword_set:
if keyword not in completed_groups:
completed_groups[keyword] = []
if (
implicit_first_group_key
and implicit_first_group_key not in completed_groups
):
completed_groups[implicit_first_group_key] = []
return completed_groups
def is_acceptable_keyword_occurence(keyword):
return (
keyword not in groups
or keyword_repeat_allowed
or keyword in keywords_for_grouping
)
def process_keyword(keyword):
if not is_acceptable_keyword_occurence(keyword):
raise CmdLineInputError(
"'{0}' cannot be used more than once".format(keyword)
)
groups.setdefault(keyword, [])
if keyword in keywords_for_grouping:
groups[keyword].append([])
def process_non_keyword(keyword, arg):
place = groups[keyword]
if keyword in keywords_for_grouping:
place = place[-1]
place.append(arg)
groups = {}
keywords_for_grouping = get_keywords_for_grouping()
if arg_list:
current_keyword = None
if arg_list[0] not in keyword_set:
if not implicit_first_group_key:
raise CmdLineInputError()
process_keyword(implicit_first_group_key)
current_keyword = implicit_first_group_key
for arg in arg_list:
if arg in keyword_set:
process_keyword(arg)
current_keyword = arg
else:
process_non_keyword(current_keyword, arg)
return get_completed_groups()
def parse_typed_arg(arg, allowed_types, default_type):
"""
Get (type, value) from a typed commandline argument.
Split the argument by the type separator and return the type and the value.
Raise CmdLineInputError in the argument format or type is not valid.
string arg -- commandline argument
Iterable allowed_types -- list of allowed argument types
string default_type -- type to return if the argument doesn't specify a type
"""
if ARG_TYPE_DELIMITER not in arg:
return default_type, arg
arg_type, arg_value = arg.split(ARG_TYPE_DELIMITER, 1)
if not arg_type:
return default_type, arg_value
if arg_type not in allowed_types:
raise CmdLineInputError(
(
"'{arg_type}' is not an allowed type for '{arg_full}', use "
"{hint}"
).format(
arg_type=arg_type,
arg_full=arg,
hint=", ".join(sorted(allowed_types)),
)
)
return arg_type, arg_value
def _is_num(arg):
return arg.isdigit() or arg.lower() == "infinity"
def _is_float(arg: str) -> bool:
try:
float(arg)
return True
except ValueError:
return False
def _is_negative_num(arg: str) -> bool:
return arg.startswith("-") and (_is_num(arg[1:]) or _is_float(arg))
def is_short_option_expecting_value(arg):
return (
len(arg) == 2
and arg[0] == "-"
and "{0}:".format(arg[1]) in PCS_SHORT_OPTIONS
)
def is_long_option_expecting_value(arg):
return (
len(arg) > 2
and arg[0:2] == "--"
and "{0}=".format(arg[2:]) in PCS_LONG_OPTIONS
)
def is_option_expecting_value(arg):
return is_short_option_expecting_value(
arg
) or is_long_option_expecting_value(arg)
# DEPRECATED
# TODO remove
# This function is called only by deprecated code for parsing argv containing
# negative numbers without -- prepending them.
def filter_out_non_option_negative_numbers(arg_list):
"""
Return arg_list without non-option negative numbers.
Negative numbers following the option expecting value are kept.
There is the problematic legacy:
Argument "--" has special meaning: it can be used to signal that no more
options will follow. This would solve the problem with negative numbers in
a standard way: there would be no special approach to negative numbers,
everything would be left in the hands of users.
We cannot use "--" as it would be a backward incompatible change:
* "pcs ... -infinity" would not work any more, users would have to switch
to "pcs ... -- ... -infinity"
* previously, position of some --options mattered, for example
"--clone <clone options>", this syntax would not be possible with the "--"
in place
Currently used --options, which may be problematic when switching to "--":
* --group <group name>, --before | --after <resource id>
* pcs resource | stonith create, pcs resource group add, pcs tag update
* They have a single argument, so they would work even with --. But the
command may look weird:
pcs resource create --group G --after R2 -- R3 ocf:pacemaker:Dummy
vs. current command
pcs resource create R3 ocf:pacemaker:Dummy --group G --after R2
list arg_list contains command line arguments
"""
args_without_negative_nums = []
args_filtered_out = []
for i, arg in enumerate(arg_list):
prev_arg = arg_list[i - 1] if i > 0 else ""
if not _is_negative_num(arg) or is_option_expecting_value(prev_arg):
args_without_negative_nums.append(arg)
else:
args_filtered_out.append(arg)
return args_without_negative_nums, args_filtered_out
# DEPRECATED
# TODO remove
# This function is called only by deprecated code for parsing argv containing
# negative numbers without -- prepending them.
def filter_out_options(arg_list):
"""
Return arg_list without options and its negative numbers.
See a comment in filter_out_non_option_negative_numbers.
list arg_list contains command line arguments
"""
args_without_options = []
for i, arg in enumerate(arg_list):
prev_arg = arg_list[i - 1] if i > 0 else ""
if not is_option_expecting_value(prev_arg) and (
not arg.starts |
from djan | go.apps import AppConfig
class MoocConfig(AppConfig):
| name = 'mooc'
|
import sympy
x1, x2 = sympy.symbols('x1 x2')
f = 100*(x2 - x1**2)**2 + (1-x1)**2
df_dx1 = sympy.diff(f,x1)
df_dx2 = sympy.diff(f,x2)
H = sympy.hessian(f, (x1, x2))
xs = sympy.solve([df_dx1, df_dx2], [x1, x2])
H_xs = H.subs([(x1,xs[0 | ][0]), (x2,xs[0][1])])
lambda_xs = H_xs.eigenvals()
count = 0
for i in lambda_xs.keys():
| if i.evalf() <= 0:
count += 1
if count == 0:
print 'Local minima'
elif count == len(lambda_xs.keys()):
print 'Lacal maxima'
else:
print 'Saddle point'
|
f cluster is passed,
restrict addresses to public and cluster networks.
Note: Some optimizations could be done here in the multi module (such as
skipping the source and destination when they are the same). However, the
unoptimized version is taking ~2.5 seconds on 18 minions with 72 addresses
for success. Failures take between 6 to 12 seconds. Optimizations should
focus there.
TODO: Convert commented out print statements to log.debug
CLI Example: (Before DeepSea with a cluster configuration)
.. code-block:: bash
sudo salt-run net.ping
or you can run it with exclude
.. code-block:: bash
sudo salt-run net.ping exclude="E@host*,host-osd-name*,192.168.1.1"
(After DeepSea with a cluster configuration)
.. code-block:: bash
sudo salt-run net.ping cluster=ceph
sudo salt-run net.ping ceph
"""
exclude_string = exclude_iplist = None
if exclude:
exclude_string, exclude_iplist = _exclude_filter(exclude)
extra_kwargs = _skip_dunder(kwargs)
if _skip_dunder(kwargs):
print "Unsupported parameters: {}".format(" ,".join(extra_kwargs.keys()))
text = re.sub(re.compile("^ {12}", re.MULTILINE), "", '''
salt-run net.ping [cluster] [exclude]
Ping all a | ddresses from all addresses on all minions.
| If cluster is specified, restrict addresses to cluster and public networks.
If exclude is specified, remove matching addresses. See Salt compound matchers.
within exclude individual ip address will be remove a specific target interface
instead of ping from, the ping to interface will be removed
Examples:
salt-run net.ping
salt-run net.ping ceph
salt-run net.ping ceph L@mon1.ceph
salt-run net.ping cluster=ceph exclude=L@mon1.ceph
salt-run net.ping exclude=S@192.168.21.254
salt-run net.ping exclude=S@192.168.21.0/29
salt-run net.ping exclude="E@host*,host-osd-name*,192.168.1.1"
''')
print text
return
local = salt.client.LocalClient()
if cluster:
search = "I@cluster:{}".format(cluster)
if exclude_string:
search += " and not ( " + exclude_string + " )"
log.debug( "ping: search {} ".format(search))
networks = local.cmd(search , 'pillar.item', [ 'cluster_network', 'public_network' ], expr_form="compound")
#print networks
total = local.cmd(search , 'grains.get', [ 'ipv4' ], expr_form="compound")
#print addresses
addresses = []
for host in sorted(total.iterkeys()):
if 'cluster_network' in networks[host]:
addresses.extend(_address(total[host], networks[host]['cluster_network']))
if 'public_network' in networks[host]:
addresses.extend(_address(total[host], networks[host]['public_network']))
else:
search = "*"
if exclude_string:
search += " and not ( " + exclude_string + " )"
log.debug( "ping: search {} ".format(search))
addresses = local.cmd(search , 'grains.get', [ 'ipv4' ], expr_form="compound")
addresses = _flatten(addresses.values())
# Lazy loopback removal - use ipaddress when adding IPv6
try:
if addresses:
addresses.remove('127.0.0.1')
if exclude_iplist:
for ex_ip in exclude_iplist:
log.debug( "ping: removing {} ip ".format(ex_ip))
addresses.remove(ex_ip)
except ValueError:
log.debug( "ping: remove {} ip doesn't exist".format(ex_ip))
pass
#print addresses
results = local.cmd(search, 'multi.ping', addresses, expr_form="compound")
#print results
_summarize(len(addresses), results)
def _address(addresses, network):
"""
Return all addresses in the given network
Note: list comprehension vs. netaddr vs. simple
"""
matched = []
for address in addresses:
if IPAddress(address) in IPNetwork(network):
matched.append(address)
return matched
def _exclude_filter(excluded):
"""
Internal exclude_filter return string in compound format
Compound format = {'G': 'grain', 'P': 'grain_pcre', 'I': 'pillar',
'J': 'pillar_pcre', 'L': 'list', 'N': None,
'S': 'ipcidr', 'E': 'pcre'}
IPV4 address = "255.255.255.255"
hostname = "myhostname"
"""
log.debug( "_exclude_filter: excluding {}".format(excluded))
excluded = excluded.split(",")
log.debug( "_exclude_filter: split ',' {}".format(excluded))
pattern_compound = re.compile("^.*([GPIJLNSE]\@).*$")
pattern_iplist = re.compile( "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$" )
pattern_ipcidr = re.compile( "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/([0-9]|[1-2][0-9]|3[0-2]))$")
pattern_hostlist = re.compile( "^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9-]*[a-zA-Z0-9]).)*([A-Za-z]|[A-Za-z][A-Za-z0-9-]*[A-Za-z0-9])$")
compound = []
ipcidr = []
iplist = []
hostlist = []
regex_list = []
for para in excluded:
if pattern_compound.match(para):
log.debug( "_exclude_filter: Compound {}".format(para))
compound.append(para)
elif pattern_iplist.match(para):
log.debug( "_exclude_filter: ip {}".format(para))
iplist.append(para)
elif pattern_ipcidr.match(para):
log.debug( "_exclude_filter: ipcidr {}".format(para))
ipcidr.append("S@"+para)
elif pattern_hostlist.match(para):
hostlist.append("L@"+para)
log.debug( "_exclude_filter: hostname {}".format(para))
else:
regex_list.append("E@"+para)
log.debug( "_exclude_filter: not sure but likely Regex host {}".format(para))
#if ipcidr:
# log.debug("_exclude_filter ip subnet is not working yet ... = {}".format(ipcidr))
new_compound_excluded = " or ".join(compound + hostlist + regex_list + ipcidr)
log.debug("_exclude_filter new formed compound excluded list = {}".format(new_compound_excluded))
if new_compound_excluded and iplist:
return new_compound_excluded, iplist
elif new_compound_excluded:
return new_compound_excluded, None
elif iplist:
return None, iplist
else:
return None, None
def _flatten(l):
"""
Flatten a array of arrays
"""
log.debug( "_flatten: {}".format(l))
return list(set(item for sublist in l for item in sublist))
def _summarize(total, results):
"""
Summarize the successes, failures and errors across all minions
"""
success = []
failed = []
errored = []
slow = []
log.debug( "_summarize: results {}".format(results))
for host in sorted(results.iterkeys()):
if results[host]['succeeded'] == total:
success.append(host)
if 'failed' in results[host]:
failed.append("{} from {}".format(results[host]['failed'], host))
if 'errored' in results[host]:
errored.append("{} from {}".format(results[host]['errored'], host))
if 'slow' in results[host]:
slow.append("{} from {} average rtt {}".format(results[host]['slow'], host, "{0:.2f}".format(results[host]['avg'])))
if success:
avg = sum( results[host].get('avg') for host in results) / len(results)
else:
avg = 0
print "Succeeded: {} addresses from {} minions average rtt {} ms".format(total, len(success), "{0:.2f}".format(avg))
if slow:
print "Warning: \n {}".format("\n ".join(slow))
if failed:
print "Failed: \n {}".format("\n ".join(failed))
if errored:
print "Errored: \n {}".format("\n ".join(errored))
def _skip_dunder(settings):
"""
Skip double underscore keys
"""
return {k:v for k,v in settings.iteritems() |
#!/usr/local/bin/python3.5
import itertools
import sys
from .stuff import word_set
__version__ = "1.1.0"
def find_possible(lst):
"""
Return all possible combinations of letters in lst
@type lst: [str]
@rtype: [str]
"""
returned_list = []
for i in range(0, len(lst) + 1):
for subset in itertools.permutations(lst, i):
possible = ''
for letter in subset:
possible += letter
if len(possible) == len(lst):
# itertools.permutations returns smaller lists
returned_list.append(possible)
return returned_list
def return_words(lst, word_set):
"""
Return combinations in that are words in word_set
@type lst: [str]
@type word_set: set(str)
@rtype: [str]
"""
returned_list = []
for word in lst:
if word in word_set or word.capitalize() in word_set:
# Some words are capitalized in th | e word_set
returned_list.append(word)
return returned_list
def main():
"""
Main function to run the program
"""
anagram_lst = []
anagram = sys.argv[1]
for char in anagram:
anagram_lst.append(char)
possible_words = find_possible(anagram_lst)
actual_words = return_words(possible_words, word_set)
print('Solutions:')
if len(actual_words) == 0:
print('None found')
else:
| for item in set(actual_words):
# Running through in set form prevents duplicates
print(item)
|
# %% [markdown]
# # sklearn-porter
#
# Repository: [https://github.com/nok/sklearn-porter](https://github.com/nok/sklearn-porter)
#
# ## RandomForestClassifier
#
# Documentation: [sklearn.ensemble.RandomForestClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html)
# %%
import sys
sys.path.append('../../../../..')
# %% [markdown]
# ### Load data
# %%
from sklearn.datasets import load_iris
iris_data = load_iris()
X = iris_data.data
y = iris_data.target
print(X.shape, y.shape)
# %% [markdown]
# ### Train classifier
# %%
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=15, max_depth=None,
min_samples_split=2, random_state=0)
clf.fit( | X, y)
# %% [markdown]
# ### Transpile classifier
# %%
from sklearn_porter import Porter
porter = Porter(clf, language='js')
output = porter.export(embed_data=True)
print(output)
# %% [markdown]
# ### Run classification in JavaScript
# %%
# Save classifier:
| # with open('RandomForestClassifier.js', 'w') as f:
# f.write(output)
# Run classification:
# if hash node 2/dev/null; then
# node RandomForestClassifier.js 1 2 3 4
# fi
|
ast_audit_txn:
res = {}
payload_data = last_audit_txn[TXN_PAYLOAD][TXN_PAYLOAD_DATA]
for ledger_id in payload_data[AUDIT_TXN_STATE_ROOT].keys():
fake_pp = BlsBftReplicaPlenum._create_fake_pre_prepare_for_multi_sig(
ledger_id,
payload_data[AUDIT_TXN_STATE_ROOT].get(ledger_id),
payload_data[AUDIT_TXN_LEDGER_ROOT].get(ledger_id),
pre_prepare
)
bls_signature = self._sign_state(fake_pp)
logger.debug("{}{} signed COMMIT {} for state {} with sig {}"
.format(BLS_PREFIX, self, commit_params, state_root_hash, bls_signature))
res[str(ledger_id)] = bls_signature
commit_params.append(res)
return commit_params
# ----PROCESS----
def process_pre_prepare(self, pre_prepare: PrePrepare, sender):
# does not matter which ledger id is current PPR for
# mult-sig is for domain ledger anyway
self._save_multi_sig_shared(pre_prepare)
def process_prepare(self, prepare: Prepare, sender):
pass
def process_commit(self, commit: Commit, sender):
key_3PC = (commit.viewNo, commit.ppSeqNo)
if f.BLS_SIGS.nm in commit and commit.blsSigs is not None:
if key_3PC not in self._all_signatures:
self._all_signatures[key_3PC] = {}
for ledger_id in commit.blsSigs.keys():
if ledger_id not in self._all_signatures[key_3PC]:
self._all_signatures[key_3PC][ledger_id] = {}
self._all_signatures[key_3PC][ledger_id][self.get_node_name(sender)] = commit.blsSigs[ledger_id]
def process_order(self, key, quorums, pre_prepare):
if not self._can_process_ledger(pre_prepare.ledgerId):
return
if not self._can_calculate_multi_sig(key, quorums):
return
# calculate signature always to keep master and non-master in sync
# but save on master only
all_bls_multi_sigs = self._calculate_all_multi_sigs(key, pre_prepare)
if not self._is_master:
return
if all_bls_multi_sigs:
for bls_multi_sig in all_bls_multi_sigs:
self._save_multi_sig_local(bls_multi_sig)
self._all_bls_latest_multi_sigs = all_bls_multi_sigs
# ----GC----
def gc(self, key_3PC):
keys_to_remove = []
for key in self._all_signatures.keys():
if compare_3PC_keys(key, key_3PC) >= 0:
keys_to_remove.append(key)
for key in keys_to_remove:
self._all_signatures.pop(key, None)
# ----MULT_SIG----
def _create_multi_sig_value_for_pre_prepare(self, pre_prepare: PrePrepare, pool_state_root_hash):
multi_sig_value = MultiSignatureValue(ledger_id=pre_prepare.ledgerId,
state_root_hash=pre_prepare.stateRootHash,
pool_state_root_hash=pool_state_root_hash,
txn_root_hash=pre_prepare.txnRootHash,
timestamp=pre_prepare.ppTime)
return multi_sig_value
def _validate_signature(self, sender, bls_sig, pre_prepare: PrePrepare):
pool_root_hash = self._get_pool_root_hash(pre_prepare, serialize=False)
sender_node = self.get_node_name(sender)
pk = self._bls_bft.bls_key_register.get_key_by_name(sender_node, pool_root_hash)
if not pk:
return False
pool_root_hash_ser = self._get_pool_root_hash(pre_prepare)
message = self._create_multi_sig_value_for_pre_prepare(pre_prepare,
pool_root_hash_ser)
result = self._bls_bft.bls_crypto_verifier.verify_sig(bls_sig, message.as_single_value(), pk)
if not result:
logger.info("Incorrect bls signature {} in commit for "
"{} public key: '{}' and message: '{}' from "
"pre-prepare: {}".format(bls_sig, sender,
IndyCryptoBlsUtils.bls_to_str(pk),
message, pre_prepare))
return result
def _validate_multi_sig(self, multi_sig: MultiSignature):
public_keys = []
pool_root_hash = self.state_root_serializer.deserialize(
multi_sig.value.pool_state_root_hash)
for node_name in multi_sig.participants:
bls_key = self._bls_bft.bls_key_register.get_key_by_name(node_name,
pool_root_hash)
# TODO: It's optional for now
if bls_key:
public_keys.append(bls_key)
value = multi_sig.value.as_single_value()
return self._bls_bft.bls_crypto_verifier.verify_multi_sig(multi_sig.signature,
value,
public_keys)
def _sign_state(self, pre_prepare: PrePrepare):
pool_root_hash = self._get_pool_root_hash(pre_prepare)
message = self._create_multi_sig_value_for_pre_prepare(pre_prepare,
pool_root_hash).as_single_value()
return self._bls_bft.bls_crypto_signer.sign(message)
def _can_calculate_multi_sig(self,
key_3PC,
quorums) -> bool:
if key_3PC not in self._all_signatures:
return False
sigs_for_request = self._all_signatures[key_3PC]
sigs_invalid = list(
filt | er(
lambda item: not quorums.bls_signatures.is_reached(len(list(item[1].values()))),
sigs_for_request.items()
)
)
if sigs_invalid:
for lid, sigs in sigs_invalid:
logger.debug(
'{}Can not create bls signatures for batch {}: '
| 'There are only {} signatures for ledger {}, '
'while {} required for multi_signature'.format(BLS_PREFIX,
key_3PC,
len(list(sigs.values())),
quorums.bls_signatures.value,
lid)
)
return False
return True
def _calculate_all_multi_sigs(self, key_3PC, pre_prepare) -> Optional[list]:
sigs_for_request = self._all_signatures.get(key_3PC)
res = []
if sigs_for_request:
for lid in sigs_for_request:
sig = sigs_for_request[lid]
audit_txn = self._get_correct_audit_transaction(pre_prepare)
if audit_txn:
audit_payload = audit_txn[TXN_PAYLOAD][TXN_PAYLOAD_DATA]
fake_pp = BlsBftReplicaPlenum. \
_create_fake_pre_prepare_for_multi_sig(int(lid),
audit_payload[AUDIT_TXN_STATE_ROOT][int(lid)],
audit_payload[AUDIT_TXN_LEDGER_ROOT][int(lid)],
pre_prepare)
res.append(self._calculate_single_multi_sig(sig, fake_pp))
return res
def _calculate_single_multi_sig(self, sigs_for_request, pre_prepare) -> Optional[MultiSignature]:
bls_signatures = list(sigs_for_request.values())
participants = list(sigs_for_request.keys())
sig = self._bls_bft.bls_crypto_verifier.create_multi_sig(bls_signatures)
pool_root_hash_ser = self._get_pool_root_hash(pre_prepare)
multi_sig_value = self._create_multi_sig_value_for_pre_prepare(pre_prepare,
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-28 14:28
from __future__ import unicode | _literals
from django.db import migrations, models
cl | ass Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('s_no', models.IntegerField()),
('name', models.CharField(max_length=200)),
],
),
]
|
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Molecule design set library table.
"""
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy import Float
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import Table
__docformat__ = "reStructuredText en"
__all__ = ['create_table']
def create_table(metadata, rack_layout_tbl, molecule_design_pool_set_tbl):
"Table factory."
tbl = Table('molecule_design_library', metadata,
Column('molecule_design_library_id', Integer, primary_key=True),
Column('molecule_design_pool_set_id', Integer,
ForeignKey(molecule_design_pool_set_tbl.c.\
molecule_design_pool_set_id,
ondelete='CASCADE', onupdate='CASCADE'),
nullable=False),
Column('label', String, nullable=False, unique=True),
| Column('final_volume', Float,
CheckConstraint('final_volume > 0'),
| nullable=False),
Column('final_concentration', Float,
CheckConstraint('final_concentration > 0'),
nullable=False),
Column('number_layouts', Integer,
CheckConstraint('number_layouts > 0'),
nullable=False),
Column('rack_layout_id', Integer,
ForeignKey(rack_layout_tbl.c.rack_layout_id,
onupdate='CASCADE', ondelete='CASCADE'),
nullable=False)
)
return tbl
|
from .careful impo | rt *
from .noisy import *
from . import streams
from . import resetable
from lasagne.updat | es import * |
'''Publish sensor events to MQTT broker.'''
import logging
import paho.mqtt.publish as mqtt_pub
import paho.mqtt.client as mqtt
import socket
class MqttPublisher():
'''Publish sensor events to an MQTT broker.'''
def __init__(self, broker, topic_prefix='/sensors'):
'''Initialize a MqttPublisher instance.'''
self.broker = broker
# TODO: Choose between hostname and fqdn
self.node_name = socket.gethostname()
self.topic_prefix = topic_prefix
def get_topic(self, evt):
'''Generate the MQTT topic for the event.'''
data = {
'prefix': self.topic_prefix,
'node': self.node_name,
'sensor': evt.getSensorName(),
'quantity': evt.getQuantity(),
}
path_tmpl = '{prefix}/{node}/{sensor}/{quantity}'
return path_tmpl.format(**data)
def publish_event(self, evt):
'''Publish a single sensor event.'''
# The publish might fail, e.g. due to network probl | ems. Just log
# the exception and try again next time.
try:
topic = self.get_topic(evt)
msg = "Publishing to topic '{0}'."
logging.debug(msg.format(topic))
# This fixes the protocol version to MQTT v3.1, because
# the current version of the MQTT broker available in
# raspbian does not support MQTT v3.1.1.
| mqtt_pub.single(
topic=topic,
payload=evt.toJSON(),
hostname=self.broker,
protocol=mqtt.MQTTv31)
except:
logging.exception('Publish of MQTT value failed.')
def publish_events(self, evts):
'''Publish a list of sensor events.'''
for evt in evts:
self.publish_event(evt)
|
from ft.db.dbtestcase import DbTestCase
from passerine.db.session import Session
from passerine.db.common import ProxyObject
from passerine.db.uow import Record
from passerine.db.entity import entity
from passerine.db.mapper import link, CascadingType, AssociationType
@entity('test_db_uow_ass_one_to_many_computer')
class Computer(object):
def __init__(self, name):
self.name = name
@link('computer', Computer, association=AssociationType.ONE_TO_ONE)
@link('delegates', association=AssociationType.ONE_TO_MANY, cascading=[CascadingType.PERSIST, CascadingType.DELETE])
@entity('test_db_uow_ass_one_to_many_developer')
class Developer(object):
def __init__(self, name, computer=None, delegates=[]):
self.name = name
self.computer = computer
self.delegates = delegates
class TestDbUowAssociationOneToMany(DbTestCase):
def setUp(self):
self._setUp()
self._reset_db(self.__data_provider())
def test_fetching(self):
c = self.session.collection(Developer)
boss = c.filter_one({'name': 'boss'})
self.assertIsInstance(boss.delegates, list)
self.assertIsInstance(boss.delegates[0], ProxyObject)
self.assertEqual(boss.delegates[0].name, 'a')
self.assertIsInstance(boss.delegates[1], ProxyObject)
self.assertEqual(boss.delegates[1].name, 'b')
def test_cascading_on_persist(self):
c = self.session.collection(Developer)
boss = c.filter_one({'name': 'boss'})
boss.delegates[0].name = 'assistant'
self.session.persist(boss)
self.session.flush()
data = c.driver.find_one(c.name, {'_id': boss.delegates[0].id})
self.assertEqual(boss.delegates[0].name, data['name'])
def test_update_association(self):
c = self.session.collection(Developer)
boss = c.filter_one({'name': 'boss'})
boss.delegates[0].name = 'assistant'
boss.delegates.append(Developer('c'))
self.session.persist(boss)
self.session.flush()
| data = c.driver.find_one(c.name, {'name': 'c'})
self.assertIsNotNone(data)
self.assertEqual(boss.delegates[2].id, data['_id'])
data = c.driver.find_one(c.name, {'name': 'boss'})
| self.assertEqual(3, len(data['delegates']))
for delegate in boss.delegates:
self.assertIn(delegate.id, data['delegates'])
def test_cascading_on_delete_with_some_deps(self):
c = self.session.collection(Developer)
boss = c.filter_one({'name': 'boss'})
boss.delegates[0].name = 'assistant'
boss.delegates.append(Developer('c'))
self.session.persist(boss)
architect = Developer('architect', delegates=[boss.delegates[0]])
self.session.persist(architect)
self.session.flush()
self.assertEqual(5, len(c.filter()))
self.session.delete(architect)
self.session.flush()
self.assertEqual(4, len(c.filter()), 'should have some dependencies left (but no orphan node)')
def test_cascading_on_delete_with_no_deps(self):
c = self.session.collection(Developer)
boss = c.filter_one({'name': 'boss'})
boss.delegates[0].name = 'assistant'
boss.delegates.append(Developer('c'))
self.session.persist(boss)
architect = Developer('architect', delegates=[boss.delegates[0]])
self.session.persist(architect)
self.session.flush()
self.session.delete(architect)
self.session.delete(boss)
self.session.flush()
count = len(c.filter())
self.assertEqual(0, count, 'There should not exist dependencies left (orphan removal). (remaining: {})'.format(count))
def __data_provider(self):
return [
{
'class': Developer,
'fixtures': [
{'_id': 1, 'name': 'a'},
{'_id': 2, 'name': 'b'},
{'_id': 3, 'name': 'boss', 'delegates': [1, 2]}
]
}
]
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test runner for TensorFlow tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shlex
import sys
impor | t tensorflow as tf
# pylint: disable=g-import-not-at-top
# pylint: disabl | e=g-bad-import-order
# pylint: disable=unused-import
# Note: cpuinfo and psutil are not installed for you in the TensorFlow
# OSS tree. They are installable via pip.
try:
import cpuinfo
import psutil
except ImportError as e:
tf.logging.error("\n\n\nERROR: Unable to import necessary library: {}. "
"Issuing a soft exit.\n\n\n".format(e))
sys.exit(0)
# pylint: enable=g-bad-import-order
# pylint: enable=unused-import
from google.protobuf import text_format
from tensorflow.core.util import test_log_pb2
from tensorflow.tools.test import run_and_gather_logs_lib
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string("test_name", "", """Test target to run.""")
tf.app.flags.DEFINE_string(
"test_args", "", """Test arguments, space separated.""")
tf.app.flags.DEFINE_string(
"test_log_output", "", """Filename to write logs.""")
tf.app.flags.DEFINE_bool(
"test_log_output_use_tmpdir", False,
"""Store the log output into tmpdir?.""")
tf.app.flags.DEFINE_string(
"compilation_mode", "", """Mode used during this build (e.g. opt, dbg).""")
tf.app.flags.DEFINE_string(
"cc_flags", "", """CC flags used during this build.""")
def gather_build_configuration():
build_config = test_log_pb2.BuildConfiguration()
build_config.mode = FLAGS.compilation_mode
# Include all flags except includes
cc_flags = [
flag for flag in shlex.split(FLAGS.cc_flags)
if not flag.startswith("-i")]
build_config.cc_flags.extend(cc_flags)
return build_config
def main(unused_args):
test_name = FLAGS.test_name
test_args = FLAGS.test_args
test_results, _ = run_and_gather_logs_lib.run_and_gather_logs(
test_name, test_args)
# Additional bits we receive from bazel
test_results.build_configuration.CopyFrom(gather_build_configuration())
serialized_test_results = text_format.MessageToString(test_results)
if not FLAGS.test_log_output:
print(serialized_test_results)
return
if FLAGS.test_log_output_use_tmpdir:
tmpdir = tf.test.get_temp_dir()
output_path = os.path.join(tmpdir, FLAGS.test_log_output)
else:
output_path = os.path.abspath(FLAGS.test_log_output)
tf.gfile.GFile(output_path, "w").write(serialized_test_results)
tf.logging.info("Test results written to: %s" % output_path)
if __name__ == "__main__":
tf.app.run()
|
else:
ftmp = lib.H5TmpFile()
eris_vvop = ftmp.create_dataset('vvop', (nvir,nvir,nocc,nmo), dtype)
orbsym = _sort_eri(mycc, eris, nocc, nvir, eris_vvop, log)
mo_energy, t1T, t2T, vooo, fvo, restore_t2_inplace = \
_sort_t2_vooo_(mycc, orbsym, t1, t2, eris)
cpu1 = log.timer_debug1('CCSD(T) sort_eri', *cpu1)
cpu2 = list(cpu1)
orbsym = numpy.hstack((numpy.sort(orbsym[:nocc]),numpy.sort(orbsym[nocc:])))
o_ir_loc = numpy.append(0, numpy.cumsum(numpy.bincount(orbsym[:nocc], minlength=8)))
v_ir_loc = numpy.append(0, numpy.cumsum(numpy.bincount(orbsym[nocc:], minlength=8)))
o_sym = orbsym[:nocc]
oo_sym = (o_sym[:,None] ^ o_sym).ravel()
oo_ir_loc = numpy.append(0, numpy.cumsum(numpy.bincount(oo_sym, minlength=8)))
nirrep = max(oo_sym) + 1
orbsym = orbsym.astype(numpy.int32)
o_ir_loc = o_ir_loc.astype(numpy.int32)
v_ir_loc = v_ir_loc.astype(numpy.int32)
oo_ir_loc = oo_ir_loc.astype(numpy.int32)
if dtype == numpy.complex128:
drv = _ccsd.libcc.CCsd_t_zcontract
else:
drv = _ccsd.libcc.CCsd_t_contract
et_sum = numpy.zeros(1, dtype=dtype)
def contract(a0, a1, b0, b1, cache):
cache_row_a, cache_col_a, cache_row_b, cache_col_b = cache
drv(et_sum.ctypes.data_as(ctypes.c_void_p),
mo_energy.ctypes.data_as(ctypes.c_void_p),
t1T.ctypes.data_as(ctypes.c_void_p),
t2T.ctypes.data_as(ctypes.c_void_p),
vooo.ctypes.data_as(ctypes.c_void_p),
fvo.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(nocc), ctypes.c_int(nvir),
ctypes.c_int(a0), ctypes.c_int(a1),
ctypes.c_int(b0), ctypes.c_int(b1),
ctypes.c_int(nirrep),
o_ir_loc.ctypes.data_as(ctypes.c_void_p),
v_ir_loc.ctypes.data_as(ctypes.c_void_p),
oo_ir_loc.ctypes.data_as(ctypes.c_void_p),
orbsym.ctypes.data_as(ctypes.c_void_p),
cache_row_a.ctypes.data_as(ctypes.c_void_p),
cache_col_a.ctypes.data_as(ctypes.c_void_p),
cache_row_b.ctypes.data_as(ctypes.c_void_p),
cache_col_b.ctypes.data_as(ctypes.c_void_p))
cpu2[:] = log.timer_debug1('contract %d:%d,%d:%d'%(a0,a1,b0,b1), *cpu2)
# The rest 20% memory for cache b
mem_now = lib.current_memory()[0]
max_memory = max(0, mycc.max_memory - mem_now)
bufsize = (max_memory*.5e6/8-nocc**3*3*lib.num_threads())/(nocc*nmo) #*.5 for async_io
bufsize *= .5 #*.5 upper triangular part is loaded
bufsize *= .8 #*.8 for [a0:a1]/[b0:b1] partition
bufsize = max(8, bufsize)
log.debug('max_memory %d MB (%d MB in use)', max_memory, mem_now)
with lib.call_in_background(contract, sync=not mycc.async_io) as async_contract:
for a0, a1 in reversed(list(lib.prange_tril(0, nvir, bufsize))):
cache_row_a = numpy.asarray(eris_vvop[a0:a1,:a1], order='C')
if a0 == 0:
cache_col_a = cache_row_a
else:
cache_col_a = numpy.asarray(eris_vvop[:a0,a0:a1], order='C')
async_contract(a0, a1, a0, a1, (cache_row_a,cache_col_a,
cache_row_a,cache_col_a))
for b0, b1 in lib.prange_tril(0, a0, bufsize/8):
cache_row_b = numpy.asarray(eris_vvop[b0:b1,:b1], order='C')
if b0 == 0:
cache_col_b = cache_row_b
else:
cache_col_b = numpy.asarray(eris_vvop[:b0,b0:b1], order='C')
async_contract(a0, a1, b0, b1, (cache_row_a,cache_col_a,
cache_row_b,cache_col_b))
t2 = restore_t2_inplace(t2T)
et_sum *= 2
if abs(et_sum[0].imag) > 1e-4:
logger.warn(mycc, 'Non-zero imaginary part of CCSD(T) energy was found %s',
et_sum[0])
et = et_sum[0].real
log.timer('CCSD(T)', *cpu0)
log.note('CCSD(T) correction = %.15g', et)
return et
def _sort_eri(mycc, eris, nocc, nvir, vvop, log):
cpu1 = (logger.process_clock(), logger.perf_counter())
mol = mycc.mol
nmo = nocc + nvir
if mol.symmetry:
orbsym = symm.addons.label_orb_symm(mol, mol.irrep_id, mol.symm_orb,
eris.mo_coeff, check=False)
orbsym = numpy.asarray(orbsym, dtype=numpy.int32) % 10
else:
orbsym = numpy.zeros(nmo, dtype=numpy.int32)
o_sorted = _irrep_argsort(orbsym[:nocc])
v_sorted = _irrep_argsort(orbsym[nocc:])
vrank = numpy.argsort(v_sorted)
max_memory = max(0, mycc.max_memory - lib.current_memory()[0])
max_memory = min(8000, max_memory*.9)
blksize = min(nvir, max(16, int(max_memory*1e6/8/(nvir*nocc*nmo))))
log.debug1('_sort_eri max_memory %g blksize %d', max_memory, blksize)
dtype = vvop.dtype
with lib.call_in_background(vvop.__setitem__, sync=not mycc.async_io) as save:
bufopv = numpy.empty((nocc,nmo,nvir), dtype=dtype)
buf1 = numpy.empty_like(bufopv)
for j0, j1 in lib.prange(0, nvir, blksize):
ovov = numpy.asarray(eris.ovov[:,j0:j1])
#ovvv = numpy.asarray(eris.ovvv[:,j0:j1])
ovvv = eris.get_ovvv(slice(None), slice(j0,j1))
for j in range(j0,j1):
oov = ovov[o_sorted,j-j0]
ovv = ovvv[o_sorted,j-j0]
#if ovv.ndim == 2:
# ovv = lib.unpack_tril(ovv, out=buf)
bufopv[:,:nocc,:] = oov[:,o_sorted][:,:,v_sorted].conj()
bufopv[:,nocc:,:] = ovv[:,v_sorted][:,:,v_sorted].conj()
save(vrank[j], bufopv.transpose(2,0,1))
bufopv, buf1 = buf1, bufopv
cpu1 = log.timer_debug1('transpose %d:%d'%(j0,j1), *cpu1)
return orbsym
def _sort_t2_vooo_(mycc, orbsym, t1, t2, eris):
assert(t2.flags.c_contiguous)
vooo = numpy.asarray(eris.ovoo).transpose(1,0,3,2).conj().copy()
nocc, nvir = t1.shape
if mycc.mol.symmetry:
orbsym = numpy.asarray(orbsym, dtype=numpy.int32)
o_sorted = _irrep_argsort(orbsym[:nocc])
v_sorted = _irrep_argsort(orbsym[nocc:])
mo_energy = eris.mo_energy
mo_energy = numpy.hstack((mo_energy[:nocc][o_sorted],
mo_energy[nocc:][v_sorted]))
t1T = numpy.asarray(t1.T[v_sorted][:,o_sorted], order='C')
fvo = eris.fock[nocc:,:nocc]
fvo = numpy.asarray(fvo[v_sorted][:,o_sorted], order='C')
o_sym = orbsym[o_sorted]
oo_sym = (o_sym[:,None] ^ o_sym).ravel()
oo_sorted = _irrep_argsort(oo_sym)
#:vooo = eris.ovoo.transpose(1,0,2,3)
#:vooo = vooo[v_sorted][:,o_sorted][:,:,o_sorted][:,:,:,o_sorted]
#:vooo = vooo.reshape(nvir,-1,nocc)[:,oo_sorted]
oo_idx = numpy.arange(nocc**2).reshape(nocc,nocc)[o_sorted][:,o_sorted]
oo_idx = oo_idx.ravel()[oo_sorted]
oo_idx = (oo_idx[:,None]*nocc+o_sorted).ravel()
| vooo = lib.take_2d(vooo.reshape(nvir,-1), v_sorted, oo_idx)
vooo = vooo.reshape(nvir,nocc,nocc,nocc)
| #:t2T = t2.transpose(2,3,1,0)
#:t2T = ref_t2T[v_sorted][:,v_sorted][:,:,o_sorted][:,:,:,o_sorted]
#:t2T = ref_t2T.reshape(nvir,nvir,-1)[:,:,oo_sorted]
t2T = lib.transpose(t2.reshape(nocc**2,-1))
oo_idx = numpy.arange(nocc**2).reshape(nocc,nocc).T[o_sorted][:,o_sorted]
oo_idx = oo_idx.ravel()[oo_sorted]
vv_idx = (v_sorted[:,None]*nvir+v_sorted).ravel()
t2T = lib.take_2d(t2T.reshape(nvir**2,nocc**2), vv_idx, oo_idx, out=t2)
t2T = t2T.reshape(nvir,nvir,nocc,nocc)
def restore_t2_inplace(t2T):
tmp = numpy.zeros((nvir**2,nocc**2), dtype=t2T.dtype)
lib.takebak_2d(tmp, t2T.reshape(nvir**2,nocc**2), vv_idx, oo_idx)
t2 = lib.transpose(tmp.reshape(nvir**2,nocc**2), out=t2T)
return t2.reshape(nocc,nocc,nvir,nvir)
else:
fvo = eris.fock[nocc:,:nocc].copy()
t1T = t1.T.copy()
t2T = lib.transpose(t2.reshape(nocc**2,nvir**2))
t2T = lib.transpose(t2T.reshape(nvir**2,nocc,nocc), axes=(0,2,1), out=t2)
|
# ===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reprod | uce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific pri | or written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ===============================================================================
from datacube.api.query import Month
__author__ = "Simon Oldfield"
import argparse
import logging
import os
from datacube.api.model import Satellite, DatasetType
from datacube.api.utils import PqaMask, WofsMask, OutputFormat
_log = logging.getLogger()
def satellite_arg(s):
if s in [sat.name for sat in Satellite]:
return Satellite[s]
raise argparse.ArgumentTypeError("{0} is not a supported satellite".format(s))
def month_arg(s):
if s in [month.name for month in Month]:
return Month[s]
raise argparse.ArgumentTypeError("{0} is not a supported month".format(s))
def pqa_mask_arg(s):
if s in [m.name for m in PqaMask]:
return PqaMask[s]
raise argparse.ArgumentTypeError("{0} is not a supported PQA mask".format(s))
def wofs_mask_arg(s):
if s in [m.name for m in WofsMask]:
return WofsMask[s]
raise argparse.ArgumentTypeError("{0} is not a supported WOFS mask".format(s))
def dataset_type_arg(s):
if s in [t.name for t in DatasetType]:
return DatasetType[s]
raise argparse.ArgumentTypeError("{0} is not a supported dataset type".format(s))
def writeable_dir(prospective_dir):
if not os.path.exists(prospective_dir):
raise argparse.ArgumentTypeError("{0} doesn't exist".format(prospective_dir))
if not os.path.isdir(prospective_dir):
raise argparse.ArgumentTypeError("{0} is not a directory".format(prospective_dir))
if not os.access(prospective_dir, os.W_OK):
raise argparse.ArgumentTypeError("{0} is not writeable".format(prospective_dir))
return prospective_dir
def readable_dir(prospective_dir):
if not os.path.exists(prospective_dir):
raise argparse.ArgumentTypeError("{0} doesn't exist".format(prospective_dir))
if not os.path.isdir(prospective_dir):
raise argparse.ArgumentTypeError("{0} is not a directory".format(prospective_dir))
if not os.access(prospective_dir, os.R_OK):
raise argparse.ArgumentTypeError("{0} is not readable".format(prospective_dir))
return prospective_dir
def readable_file(prospective_file):
if not os.path.exists(prospective_file):
raise argparse.ArgumentTypeError("{0} doesn't exist".format(prospective_file))
if not os.path.isfile(prospective_file):
raise argparse.ArgumentTypeError("{0} is not a file".format(prospective_file))
if not os.access(prospective_file, os.R_OK):
raise argparse.ArgumentTypeError("{0} is not readable".format(prospective_file))
return prospective_file
def date_arg(s):
try:
return parse_date(s)
except ValueError:
raise argparse.ArgumentTypeError("{0} is not a valid date".format(s))
def date_min_arg(s):
try:
return parse_date_min(s)
except ValueError:
raise argparse.ArgumentTypeError("{0} is not a valid date".format(s))
def date_max_arg(s):
try:
return parse_date_max(s)
except ValueError:
raise argparse.ArgumentTypeError("{0} is not a valid date".format(s))
def dummy(path):
_log.debug("Creating dummy output %s" % path)
import os
if not os.path.exists(path):
with open(path, "w") as f:
pass
def parse_date(s):
from datetime import datetime
return datetime.strptime(s, "%Y-%m-%d").date()
def parse_date_min(s):
from datetime import datetime
if s:
if len(s) == len("YYYY"):
return datetime.strptime(s, "%Y").date()
elif len(s) == len("YYYY-MM"):
return datetime.strptime(s, "%Y-%m").date()
elif len(s) == len("YYYY-MM-DD"):
return datetime.strptime(s, "%Y-%m-%d").date()
return None
def parse_date_max(s):
from datetime import datetime
import calendar
if s:
if len(s) == len("YYYY"):
d = datetime.strptime(s, "%Y").date()
d = d.replace(month=12, day=31)
return d
elif len(s) == len("YYYY-MM"):
d = datetime.strptime(s, "%Y-%m").date()
first, last = calendar.monthrange(d.year, d.month)
d = d.replace(day=last)
return d
elif len(s) == len("YYYY-MM-DD"):
d = datetime.strptime(s, "%Y-%m-%d").date()
return d
return None
def output_format_arg(s):
if s in [f.name for f in OutputFormat]:
return OutputFormat[s]
raise argparse.ArgumentTypeError("{0} is not a supported output format".format(s))
|
is = axis[1:]
self.position_sign[i] = -1.0
if axis not in ('x','y','z'):
rospy.logwarn('Invalid axis %s given in [axes_mapping]' % axis)
self.position_axes[i] = ['x','y','z'].index(axis)
self.workspace = self.change_axes(self.workspace)
# Rate parameters
self.rate_pivot = np.zeros(3)
self.rate_gain = self.read_parameter('~rate_gain', 1.0)
# Initial values
self.frame_id = self.read_parameter('~frame_id', 'world')
self.colors = TextColors()
self.master_pos = None
self.master_rot = np.array([0, 0, 0, 1])
self.master_vel = np.zeros(3)
self.master_dir = np.zeros(3)
self.slave_pos = None
self.slave_rot = np.array([0, 0, 0, 1])
self.timer = None
self.force_feedback = np.zeros(3)
self.pos_force_feedback = np.zeros(3)
# Synch
self.slave_synch_pos = np.zeros(3)
self.slave_synch_rot = np.array([0, 0, 0, 1])
# Setup Subscribers/Publishers
self.feedback_pub = rospy.Publisher(self.feedback_topic, OmniFeedback)
self.ik_mc_pub = rospy.Publisher(self.ik_mc_topic, PoseStamped)
self.vis_pub = rospy.Publisher('visualization_marker', Marker)
rospy.Subscriber(self.master_state_topic, OmniState, self.cb_master_state)
rospy.Subscriber(self.slave_state_topic, EndpointState, self.cb_slave_state)
rospy.Subscriber('/takktile/force_feedback', Wrench, self.feedback_cb)
self.loginfo('Waiting for [%s] and [%s] topics' % (self.master_state_topic, self.slave_state_topic))
while not rospy.is_shutdown():
if (self.slave_pos == None) or (self.master_pos == None):
rospy.sleep(0.01)
else:
self.loginfo('Rate position controller running')
# Register rospy shutdown hook
rospy.on_shutdown(self.shutdown_hook)
break
# Make sur | e the first command sent to the slave is equal to its current position6D
self.command_pos = np.array(self.slave_pos)
self.command_rot = np.array(self.slave_rot)
# Start the timer that will publish the ik commands
self.command_timer = rospy.Timer(rospy.Duration(1.0/self.publish_frequency | ), self.publish_command)
self.draw_timer = rospy.Timer(rospy.Duration(1.0/10.0), self.draw_position_region)
self.loginfo('State machine state: GO_TO_CENTER')
@smach.cb_interface(outcomes=['lock', 'succeeded', 'aborted'])
def go_to_center(user_data, self):
if not np.allclose(np.zeros(3), self.master_pos, atol=self.hysteresis) or self.locked:
self.force_feedback = (self.k_center * self.master_pos + self.b_center * self.master_vel) * -1.0
return 'lock'
else:
self.force_feedback = np.zeros(3)
self.slave_synch_pos = np.array(self.slave_pos)
self.command_pos = np.array(self.slave_pos)
self.loginfo('State machine transitioning: GO_TO_CENTER:succeeded-->POSITION_CONTROL')
return 'succeeded'
@smach.cb_interface(outcomes=['stay', 'leave', 'aborted'])
def position_control(user_data, self):
if self.inside_workspace(self.master_pos) and not self.locked:
self.command_pos = self.slave_synch_pos + self.master_pos / self.position_ratio
self.force_feedback = self.pos_force_feedback
return 'stay'
else:
self.force_feedback = np.zeros(3)
self.command_pos = np.array(self.slave_pos)
self.rate_pivot = self.master_pos
self.loginfo('State machine transitioning: POSITION_CONTROL:leave-->RATE_CONTROL')
return 'leave'
@smach.cb_interface(outcomes=['stay', 'leave', 'aborted'])
def rate_control(user_data, self):
if not (self.inside_workspace(self.master_pos) or self.locked):
penetration = sqrt(np.sum((self.master_pos - self.rate_pivot) ** 2)) * self.normalize_vector(self.master_pos)
# Send the force feedback to the master
self.force_feedback = (self.k_rate * penetration + self.b_rate * self.master_vel) * -1.0
# Send the rate command to the slave
self.command_pos += (self.rate_gain * penetration) / self.position_ratio
# Move the workspace
self.slave_synch_pos = self.slave_pos - self.master_pos / self.position_ratio
return 'stay'
else:
self.command_pos = np.array(self.slave_pos)
self.force_feedback = np.zeros(3)
self.loginfo('State machine transitioning: RATE_CONTROL:leave-->POSITION_CONTROL')
return 'leave'
@smach.cb_interface(outcomes=['succeeded', 'aborted'])
def rate_collision(user_data, self):
return 'succeeded'
def execute(self):
self.sm.execute()
def shutdown_hook(self):
# Stop timers
self.command_timer.shutdown()
self.draw_timer.shutdown()
# Stop the state machine
self.sm.request_preempt()
def read_parameter(self, name, default):
if not rospy.has_param(name):
rospy.logwarn('Parameter [%s] not found, using default: %s' % (name, default))
return rospy.get_param(name, default)
def loginfo(self, msg):
#~ rospy.logwarn(self.colors.OKBLUE + str(msg) + self.colors.ENDC)
rospy.loginfo(self.colors.OKBLUE + str(msg) + self.colors.ENDC)
def inside_workspace(self, point):
# The workspace as an ellipsoid: http://en.wikipedia.org/wiki/Ellipsoid
return np.sum(np.divide(point**2, self.workspace**2)) < 1
def normalize_vector(self, v):
result = np.array(v)
norm = np.sqrt(np.sum((result ** 2)))
if norm:
result /= norm
return result
def change_axes(self, array, index=None, sign=None):
if index == None:
index = self.position_axes
if sign == None:
sign = self.position_sign
result = np.zeros(len(array))
for i, idx in enumerate(index):
result[i] = array[idx] * sign[idx]
return result
def change_force_axes(self, array, index=None, sign=None):
if index == None:
index = self.position_axes
if sign == None:
sign = self.position_sign
result = np.zeros(len(array))
for i, idx in enumerate(index):
result[i] = array[idx] * sign[i]
return result
def send_feedback(self):
feedback_msg = OmniFeedback()
force = self.change_force_axes(self.force_feedback)
pos = self.change_axes(self.center_pos)
feedback_msg.force = Vector3(*force)
feedback_msg.position = Vector3(*pos)
self.feedback_pub.publish(feedback_msg)
# DO NOT print to the console within this function
def cb_master_state(self, msg):
pos = np.array([msg.pose.position.x, msg.pose.position.y, msg.pose.position.z]) - self.center_pos
vel = np.array([msg.velocity.x, msg.velocity.y, msg.velocity.z])
self.master_pos = self.change_axes(pos)
self.master_vel = self.change_axes(vel)
self.master_rot = np.array([msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w])
self.master_dir = self.normalize_vector(self.master_vel)
self.locked = msg.locked
def cb_slave_state(self, msg):
self.slave_pos = np.array([msg.pose.position.x, msg.pose.position.y, msg.pose.position.z])
self.slave_rot = np.array([msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w])
def feedback_cb(self, msg):
self.pos_force_feedback = np.array([msg.force.x, msg.force.y, msg.force.z])
def publish_command(self, event):
position, orientation = self.command_pos, self.command_rot
ik_mc_msg = PoseStamped()
ik_mc_msg.header.frame_id = self.frame_id
ik_mc_msg.header.stamp = rospy.Time.now()
ik_mc_msg.pose.position = Point(*position)
ik_mc_msg.pose.orientation = Quaternion(*orientation)
try:
self.ik_mc_pub.publish(ik_mc_msg)
self.send_feedback()
except rospy.exceptions.ROSException:
pass
def draw_position_region(self, event):
marker = Marker()
marker.header.frame_id = self.frame_id
marker.header.stamp = rospy.Time.now()
marker.id = 0;
marker.type = marker.SPHERE
marker.ns = 'position_region'
marker.action = marker.ADD
marker.pose.position = Point(*self.slave_synch_pos)
#~ Workspace ellipsoid: self.workspace
scale = 2 * self.workspace / self.position_ratio
marker.scale = Vector3(*scale)
marker.color.a |
# coding: utf-8
from rest_framework import viewsets
from kpi.models import UserAssetSubscription
from kpi.serializers.v2.user_asset_subscription import (
UserAssetSubscriptionSerializer,
)
from kpi.utils.object_permission import get_database_user
class UserAssetSubscriptionViewSet(viewsets.ModelViewSet):
queryset = UserAssetSubscription.objects.none()
serializer_class = UserAssetSubscriptionSerializer
lookup_field = 'uid'
def get_queryset(self):
user = get_database_user(self.request.user)
criteria = {'user': user}
if 'asset__uid' in self.request.query_params:
| criteria['asset__uid'] = self.request.query_params[
| 'asset__uid']
return UserAssetSubscription.objects.filter(**criteria)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
|
#!/usr/bin/env python2
#-*-indent-tabs-mode: nil-*-
import sys
import os.path
import gi
from gi.repository import Gtk, Gio
SCHEMAS = "org.cinnamon.desklets.launcher"
LAUNCHER_KEY = "launcher-list"
HOME_DIR = os.path.expanduser("~")+"/"
CUSTOM_LAUNCHERS_PATH = HOME_DIR + ".cinnamon/panel-launchers/"
EDITOR_DIALOG_UI_PATH = "/usr/share/cinnamon/desklets/launcher@cinnamon.org/editorDialog.ui"
class EditorDialog:
def __init__(self, desklet_id=-1):
self.launcher_settings = Gio.Settings.new(SCHEMAS)
self.launcher_type = "Application"
self.name = ""
self.desklet_id = desklet_id
if not desklet_id == -1:
launcher_list = self.launcher_settings.get_strv(LAUNCHER_KEY)
launcher = ""
for item in launcher_list:
if item.split(":")[0] == str(self.desklet_id):
launcher = item.split(":")[1][:-8]
break;
self.name = launcher
if self.name[:24] == "cinnamon-custom-launcher":
self.launcher_type = "Custom Application"
self.tree = Gtk.Builder()
self.tree.add_from_file(EDITOR_DIALOG_UI_PATH)
self.dialog = self.tree.get_object("dialog")
self.launcher_type_combo_box = self.tree.get_object("launcher_type_combo_box")
self.name_entry = self.tree.get_object("name_entry")
self.title_entry = self.tree.get_object("title_entry")
self.command_entry = self.tree.get_object("command_entry")
self.icon_name_entry = self.tree.get_object("icon_name_entry")
self.launcher_icon = self.tree.get_object("launcher_icon")
self.name_entry.set_text(self.name)
self.model = self.launcher_type_combo_box.get_model()
self.citer = [self.model.get_iter_from_string("0"),self.model.get_iter_from_string("1")]
self.launcher_type_combo_box.set_active_iter(self.citer[self.launcher_type_to_index(self.launcher_type)])
self.update_sensitivity()
self.set_fields_by_name()
self.on_icon_changed(self.icon_name_entry.get_text())
self.tree.connect_signals(self)
self.dialog.show_all()
self.dialog.connect("destroy", Gtk.main_quit)
self.dialog.connect("key_release_event", self.on_key_release_event)
Gtk.main()
def launcher_type_to_index(self,launcher_type):
if launcher_type == "Application":
return 0
elif launcher_type == "Custom Application":
return 1
def update_sensitivity(self):
sensitive = True
if (self.launcher_type == "Application"):
sensitive = False
self.name_entry.set_sensitive(not sensitive)
self.title_entry.set_sensitive(sensitive)
self.command_entry.set_sensitive(sensitive)
self.icon_name_entry.set_sensitive(sensitive)
if (self.launcher_type == "Application"):
self.name_entry.grab_focus()
else:
self.title_entry.grab_focus()
def on_launcher_type_combo_box_changed(self, widget):
self.launcher_type = self.launcher_type_combo_box.get_active_text()
self.update_sensitivity()
self.on_name_changed(self.name_entry)
def on_icon_changed(self, widget):
self.launcher_icon.set_from_icon_name(self.icon_name_entry.get_text(), 48)
def on_name_changed(self, widget):
if (self.launcher_type == "Application"):
self.set_fields_by_name()
def set_fields_by_name(self):
application = Application(self.name_entry.get_text() + ".desktop")
if application.title:
self.title_entry.set_text(application.title)
self.command_entry.set_text(application.command)
self.icon_name_entry.set_text(application.icon_name)
def on_key_release_event(self, widget, event):
if event.keyval == 65293: # Enter button
self.on_edit_ok_clicked(widget)
def on_edit_close_clicked(self, widget):
self.dialog.destroy()
def on_edit_ok_clicked(self, widget):
if not self.name_entry.get_text():
return None
if (self.launcher_type == "Application"):
launcher_name = self.name_entry.get_text() + ".desktop"
elif (self.launcher_type == "Custom Application"):
launcher_name = self.write_custom_application()
enabled_desklets = None
if self.desklet_id == -1: # Add new launcher
settings = Gio.Settings.new("org.cinnamon")
self.desklet_id = settings.get_int("next-desklet-id")
settings.set_int("next-desklet-id", self.desklet_id + 1)
enabled_desklets = settings.get_strv("enabled-desklets")
enabled_desklets.append("launcher@cinnamon.org:%s:0:100" % self.desklet_id)
launcher_list = self.launcher_settings.get_strv(LAUNCHER_KEY)
# If the application is initiall set in the list, remove them all
for item in launcher_list:
if item.split(":")[0] == str(self.desklet_id):
launcher_list.remove(item)
launcher_list.append(str(self.desklet_id) + ":" + launcher_name)
self.launcher_settings.set_strv(LAUNCHER_KEY, launcher_list)
# Update desklets list now if new desklet is made
if enabled_desklets:
settings.set_strv("enabled-desklets", enabled_desklets)
self.dialog.destroy()
def get_custom_id(self):
i = 1
directory = Gio.file_new_for_path(CUSTOM_LAUNCHERS_PATH)
if not directory.query_exists(None):
directory.make_directory_with_parents(None)
fileRec = Gio.file_parse_name(CUSTOM_LAUNCHERS_PATH + 'cinnamon-custom-launcher-' + str(i) + '.desktop')
while fileRec.query_exists(None):
i = i + 1
fileRec = Gio.file_parse_name(CUSTOM_LAUNCHERS_PATH + 'cinnamon-custom-launcher-' + str(i) + '.desktop')
return i;
def write_custom_application(self):
i = self.get_custom_id();
file_name = "cinnamon-custom-launcher-" + str(i) + ".desktop"
file_path = CUSTOM_LAUNCHERS_PATH + file_name
title = self.title_entry.get_text()
command = self.command_entry.get_text()
icon_name = self.icon_name_entry.get_text()
_file = open(file_path,"w+")
write_list=["[Desktop Entry]\n","Type=Application\n", "Name=" + title + "\n","Exec=" + command + "\n","Icon=" + icon_name + "\n"]
_file.writelines(write_list)
_file.close()
| return file_name
class Application:
def __init__(self, file_name):
self.file_name = file_name
self._path = None
self.icon_name = None
self.title = None
self.command = None
if (os.path.exists(CUSTOM_LAUNCHERS_PATH + file_name)):
self._path = CUSTOM_LAUNCHERS_PATH + file_name
elif (os.path.exists("/usr/share/applications/" + file_name)):
self._path = "/usr/share/applications/" + file_name
if | self._path:
self._file = open(self._path, "r")
while self._file:
line = self._file.readline()
if len(line)==0:
break
if (line.find("Name") == 0 and (not "[" in line)):
self.title = line.replace("Name","").replace("=","").replace("\n","")
if (line.find("Icon") == 0):
self.icon_name = line.replace("Icon","").replace(" ","").replace("=","").replace("\n","")
if (line.find("Exec") == 0):
self.command = line.replace("Exec","").replace("=","").replace("\n","")
if self.icon_name and self.title and self.command:
break
if not self.icon_name:
self.icon_name = "application-x-executable"
if not self.title:
self.title = "Application"
if not self.command:
self.command = ""
self._file.close()
if __name__ == "__main__":
if len(sys.argv) > 1:
dialog = EditorDialog(sys.argv[1])
else:
dialog = EditorDialog()
|
# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you c | an redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in th | e hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import cylc.flow.tui.data
from cylc.flow.tui.data import generate_mutation
def test_generate_mutation(monkeypatch):
"""It should produce a GraphQL mutation with the args filled in."""
arg_types = {
'foo': 'String!',
'bar': '[Int]'
}
monkeypatch.setattr(cylc.flow.tui.data, 'ARGUMENT_TYPES', arg_types)
assert generate_mutation(
'my_mutation',
['foo', 'bar']
) == '''
mutation($foo: String!, $bar: [Int]) {
my_mutation (foos: $foo, bars: $bar) {
result
}
}
'''
|
ec : int, optional
Explicitly specify total number of vectors
(in case word vectors are appended with document vectors afterwards).
"""
_save_word2vec_format(fname, self.vocab, self.syn0, fvocab=fvocab, binary=binary, total_vec=total_vec)
@classmethod
def load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',
limit=None, datatype=REAL):
"""Load the input-hidden weight matrix from the original C word2vec-tool format.
Use :func:`~gensim.models.utils_any2vec._load_word2vec_format`.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
Parameters
----------
fname : str
The file path to the saved word2vec-format file.
fvocab : str, optional
File path to the vocabulary.Word counts are read from `fvocab` filename, if set
(this is the file generated by `-save-vocab` flag of the original C tool).
binary : bool, optional
If True, indicates whether the data is in binary word2vec format.
encoding : str, optional
If you trained the C model using non-utf8 encoding for words, specify that encoding in `encoding`.
unicode_errors : str, optional
default 'strict', is a string suitable to be passed as the `errors`
argument to the unicode() (Python 2.x) or str() (Python 3.x) function. If your source
file may include word tokens truncated in the middle of a multibyte unicode character
(as is common from the original word2vec.c tool), 'ignore' or 'replace' may help.
limit : int, optional
Sets a maximum number of word-vectors to read from the file. The default,
None, means read all.
datatype : type, optional
(Experimental) Can coerce dimensions to a non-default float type (such as `np.float16`) to save memory.
Such types may result in much slower bulk operations or incompatibility with optimized routines.)
Returns
-------
:class:`~gensim.models.poincare.PoincareModel`
Loaded Poincare model.
" | ""
return _load_word2vec_format(
cls, fname, fvocab=fvocab, binary=binary, encoding=encoding, unicode_errors=unicode_errors,
limit=limit, datatype=datatype)
@staticmethod
def vector_distance(v | ector_1, vector_2):
"""Compute poincare distance between two input vectors. Convenience method over `vector_distance_batch`.
Parameters
----------
vector_1 : numpy.array
Input vector.
vector_2 : numpy.array
Input vector.
Returns
-------
numpy.float
Poincare distance between `vector_1` and `vector_2`.
"""
return PoincareKeyedVectors.vector_distance_batch(vector_1, vector_2[np.newaxis, :])[0]
@staticmethod
def vector_distance_batch(vector_1, vectors_all):
"""Compute poincare distances between one vector and a set of other vectors.
Parameters
----------
vector_1 : numpy.array
vector from which Poincare distances are to be computed, expected shape (dim,).
vectors_all : numpy.array
for each row in vectors_all, distance from vector_1 is computed, expected shape (num_vectors, dim).
Returns
-------
numpy.array
Poincare distance between `vector_1` and each row in `vectors_all`, shape (num_vectors,).
"""
euclidean_dists = np.linalg.norm(vector_1 - vectors_all, axis=1)
norm = np.linalg.norm(vector_1)
all_norms = np.linalg.norm(vectors_all, axis=1)
return np.arccosh(
1 + 2 * (
(euclidean_dists ** 2) / ((1 - norm ** 2) * (1 - all_norms ** 2))
)
)
def closest_child(self, node):
"""Get the node closest to `node` that is lower in the hierarchy than `node`.
Parameters
----------
node : {str, int}
Key for node for which closest child is to be found.
Returns
-------
{str, None}
Node closest to `node` that is lower in the hierarchy than `node`.
If there are no nodes lower in the hierarchy, None is returned.
"""
all_distances = self.distances(node)
all_norms = np.linalg.norm(self.syn0, axis=1)
node_norm = all_norms[self.vocab[node].index]
mask = node_norm >= all_norms
if mask.all(): # No nodes lower in the hierarchy
return None
all_distances = np.ma.array(all_distances, mask=mask)
closest_child_index = np.ma.argmin(all_distances)
return self.index2word[closest_child_index]
def closest_parent(self, node):
"""Get the node closest to `node` that is higher in the hierarchy than `node`.
Parameters
----------
node : {str, int}
Key for node for which closest parent is to be found.
Returns
-------
{str, None}
Node closest to `node` that is higher in the hierarchy than `node`.
If there are no nodes higher in the hierarchy, None is returned.
"""
all_distances = self.distances(node)
all_norms = np.linalg.norm(self.syn0, axis=1)
node_norm = all_norms[self.vocab[node].index]
mask = node_norm <= all_norms
if mask.all(): # No nodes higher in the hierarchy
return None
all_distances = np.ma.array(all_distances, mask=mask)
closest_child_index = np.ma.argmin(all_distances)
return self.index2word[closest_child_index]
def descendants(self, node, max_depth=5):
"""Get the list of recursively closest children from the given node, up to a max depth of `max_depth`.
Parameters
----------
node : {str, int}
Key for node for which descendants are to be found.
max_depth : int
Maximum number of descendants to return.
Returns
-------
list of str
Descendant nodes from the node `node`.
"""
depth = 0
descendants = []
current_node = node
while depth < max_depth:
descendants.append(self.closest_child(current_node))
current_node = descendants[-1]
depth += 1
return descendants
def ancestors(self, node):
"""Get the list of recursively closest parents from the given node.
Parameters
----------
node : {str, int}
Key for node for which ancestors are to be found.
Returns
-------
list of str
Ancestor nodes of the node `node`.
"""
ancestors = []
current_node = node
ancestor = self.closest_parent(current_node)
while ancestor is not None:
ancestors.append(ancestor)
ancestor = self.closest_parent(ancestors[-1])
return ancestors
def distance(self, w1, w2):
"""Calculate Poincare distance between vectors for nodes `w1` and `w2`.
Parameters
----------
w1 : {str, int}
Key for first node.
w2 : {str, int}
Key for second node.
Returns
-------
float
Poincare distance between the vectors for nodes `w1` and `w2`.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> # Read the sample relations file and train the model
>>> relations = PoincareRelations(file_path=datapath('poincare_hypernyms_large.tsv'))
>>> model = PoincareModel(train_data=relations)
>>> model.train(epochs=50)
>>>
>>> # What is the distance between the words 'mammal' and 'carnivore'?
|
from django.conf import settings
from django.utils import httpwrappers
from django.core.mail import mail_managers
import md5, os
class CommonMiddleware:
"""
"Common" middleware for taking care of some basic operations:
- Forbids access to User-Agents in settings.DISALLOWED_USER_AGENTS
- URL rewriting: Based on the APPEND_SLASH and PREPEND_WWW settings,
this middleware appends missing slashes and/or prepends missing "www."s.
- ETags: If the USE_ETAGS setting is set, ETags will be calculated from
the entire page content and Not Modified responses will be returned
appropriately.
"""
def process_request(self, request):
"""
Check for denied User-Agents and rewrite the URL based on
settings.APPEND_SLASH and settings.PREPEND_WWW
"""
# Check for denied User-Agents
if request.META.has_key('HTTP_USER_AGENT'):
for user_agent_regex in settings.DISALLOWED_USER_AGENTS:
if user_agent_regex.search(request.META['HTTP_USER_AGENT']):
return httpwrappers.HttpResponseForbidden('<h1>Forbidden</h1>')
# Check for a redirect based on settings.APPEND_SLASH and settings.PREPEND_WWW
old_url = [request.META['HTTP_HOST'], request.path]
new_url = old_url[:]
if settings.PREPEND_WWW and not old_url[0].startswith('www.'):
new_url[0] = 'www.' + old_url[0]
# Append a slash if append_slash is set and the URL doesn't have a
# trailing slash or a file extension.
if settings.APPEND_SLASH and (old_url[1][-1] != '/') and ('.' not in old_url[1].split('/')[-1]):
new_url[1] = new_url[1] + '/'
if new_url != old_url:
# Redirect
newurl = "%s://%s%s" % (os.environ.get('HTTPS') == 'on' and 'https' or 'http', new_url[0], new_url[1])
if request.GET:
newurl += '?' + request.GET.urlencode()
return httpwrappers.HttpResponseRedirect(newurl)
return None
def process_response(self, request, response):
"Check for a flat page (for 404s) and calculate the Etag, if needed."
if response.status_code == 404:
if settings.SEND_BROKEN_LINK_EMAILS:
# If the referrer was from an internal link or a non-search-engine site,
# send a note to the managers.
domain = request.META['HTTP_HOST']
referer = request.META.get('HTTP_REFERER', None)
is_internal = referer and (domain in referer)
path = request.get_full_path()
if referer and not _is_ignorable_404(path) and (is_internal or '?' not in referer):
mail_managers("Broken %slink on %s" % ((is_internal and 'INTERNAL ' or ''), domain),
"Referrer: %s\nRequested URL: %s\n" % (referer, request.get_full_path()))
return response
# Use ETags, if requested.
if settings.USE_ETAGS:
etag = md5.new(response.get_content_as_string(sett | ings.DEFAULT_CHARSET)).hexdigest()
if request.META.get('HTTP_IF_NONE_MATCH') == etag:
response = | httpwrappers.HttpResponseNotModified()
else:
response['ETag'] = etag
return response
def _is_ignorable_404(uri):
"Returns True if a 404 at the given URL *shouldn't* notify the site managers"
for start in settings.IGNORABLE_404_STARTS:
if uri.startswith(start):
return True
for end in settings.IGNORABLE_404_ENDS:
if uri.endswith(end):
return True
return False
|
# -*- coding: utf-8 -*-
import requests
import urlparse
class TV(object):
def __init__(self, url):
self.url = url
self.chan = None
self.stream = None
def _post(self, endpoint, json):
url = urlparse.urljoin(self.url, endpoint)
try:
requests.post(url, json= | json)
except:
pass
def _json(self, action, data='', options='live'):
return {
'action': action,
'data': data,
'options': options
}
def start(self, chan, stream, modus):
self.ch | an = chan
self.stream = stream
json = self._json('start', data=stream, options=modus)
self._post('playback', json)
def stop(self):
self.chan = None
self.stream = None
json = self._json('stop')
self._post('playback', json)
def play(self):
json = self._json('play')
self._post('playback', json)
def pause(self):
json = self._json('pause')
self._post('playback', json)
def vol(self, action):
json = self._json(action)
self._post('volume', json)
|
import os
import unittest
from robot.utils.asserts import assert_equal, assert_true
from robot.utils.etreewrapper import ETSource, ET
from robot.utils import IRONPYTHON, PY3
PATH = os.path.join(os.path.dirname(__file__), 'test_etreesource.py')
if PY3:
unicode = str
class TestETSource(unittest.TestCase):
def test_path_to_file(self):
source = ETSource(PATH)
with source as src:
assert_equal(src, PATH)
self._verify_string_representation(source, PATH)
assert_true(source._opened is None)
def test_opened_file_object(self):
source = ETSource(open(PATH))
with source as src:
assert_true(src.read().startswith('import os'))
assert_true(src.closed is False)
self._verify_string_representation(source, PATH)
assert_true(source._opened is None)
def test_byte_string(self):
self._test_string('\n<tag>content</tag>\n')
def test_unicode_string(self):
self._test_string(u'\n<tag>hyv\xe4</tag>\n')
def _test_string(self, xml):
source = ETSource(xml)
with source as src:
content = src.read()
if not IRONPYTHON:
content = content.decode('UTF-8')
assert_equal(content, xml)
self._verify_string_representa | tion(source, '<in-memory file>')
assert_true(source._opened.closed)
with ETSource(xml) as src:
assert_equal(ET.parse(src).getroot().tag, 'tag')
def test_non_ascii_string_repr(self):
self._verify_string_representation(ETSource(u'\xe4'), u'\xe4')
def _verify_string_representation(self, source, expected):
assert_equal(unicode(source), expected)
assert_equal(u'-%s-' % source, '-%s-' % expected)
if __name__ == '__main__':
un | ittest.main()
|
# Django
from django.conf.urls import patterns, url
# local Django
from organization.views import OrganizationCreateView, OrganizationDeleteView, OrganizationListView, OrganizationUpdateView
urlpatterns = patterns(
'',
url(r'^create/$', OrganizationCreateView.as_view(), name='create'),
url(r'^delete/(?P<organization_id>\d+)$',
OrganizationDeleteView.as_view(),
name='delete'),
url(r'^e | dit/(?P<organization_id>\d+)$',
OrganizationUpdateView.as_view(),
name='edit'),
url | (r'^list/$', OrganizationListView.as_view(), name='list'),
)
|
w_authors = True
# ------ Sphinx
# Add any paths that contain templates here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]#templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GPy'
#author = u'`Humans <https://github.com/SheffieldML/GPy/graphs/contributors>`_'
author = 'GPy Authors, see https://github.com/SheffieldML/GPy/graphs/contributors'
copyright = u'2020, '+author
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
with open('../../GPy/__version__.py', 'r') as f:
version = f.read()
release = version
print version
# version = '0.8.8'
# The full version, including alpha/beta/rc tags.
# release = '0.8.8'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'python'
# autodoc:
autoclass_content = 'both'
autodoc_default_flags = ['members',
#'undoc-members',
#'private-members',
#'special-members',
#'inherited-members',
'show-inheritance']
autodoc_member_order = 'groupwise'
add_function_parentheses = False
add_module_names = False
modindex_common_prefix = ['paramz']
show_authors = True
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = dict(sidebarwidth='20}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'wide.css',
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {
# '**': ['globaltoc.html', 'localtoc.html', 'sourcelink.html', 'searchbox.html'],
# 'using/windows': ['windowssidebar.html', 'searchbox.html'],
#}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
html_split_index = True
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_sear | ch_options = {'type': 'default'}
# The name of a javascrip | t file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'GPydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
#latex_documents = [
# (master_doc, 'GPy.tex', u'GPy Documentation',
# u'GPy Authors', 'manual'),
#]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
#man_pages = |
return d
def get_section_pattern(self, section):
regexp = '(.*):(.*)'
sec = re.match(regexp, section)
if sec:
Global.current_hosts = self.pattern_stripping(sec.group(2))
section = sec.group(1)
section = section.lower().replace('-', '_')
section = [v for v in feature_list if re.match(v, section)]
if not section:
return False
if len(section) > 1:
print "Error: Oops! gdeploy is a bit confused with the " \
"section name %s. Please check your configuration file." % section
self.cleanup_and_quit()
return section[0]
def remove_from_sections(self, regexp):
r = re.compile(regexp)
section_names = filter(r.match, Global.sections)
map(Global.sections.__delitem__, section_names)
def run_playbook(self, yaml_file, section_dict=None):
self.create_inventory()
if hasattr(self, 'section_dict'):
if not section_dict:
section_dict = self.section_dict
if section_dict:
section_dict['master'] = Global.master
self.filename = Global.group_file
self.create_var_files(section_dict)
yml = self.get_file_dir_path(Global.base_dir, yaml_file)
self.exec_ansible_cmd(yml)
def exec_ansible_cmd(self, playbooks_file):
executable = 'ansible-playbook'
command = [executable, '-i', Global.inventory, Global.verbose,
playbooks_file]
command = filter(None, command)
try:
retcode = subprocess.call(command, shell=False)
# Exit gdeploy in case of errors and user has explicitly set
# not to ignore errors
if retcode != 0 and Global.ignore_errors != 'yes':
self.cleanup_and_quit(1)
elif retcode != 0 and Global.ignore_errors == 'yes':
print "Ignoring errors..."
except (OSError, subprocess.CalledProcessError) as e:
print "Error: Command %s failed. (Reason: %s)" % (cmd, e)
sys.exit()
def listify(self, var):
if not var:
return []
if not type(var) is list:
return [var]
return var
def correct_brick_format(self, brick_list):
bricks = []
for brick in brick_list:
if not brick.startswith('/dev/'):
bricks.append('/dev/' + brick)
else:
bricks.append(brick)
return bricks
def volname_formatter(self, section_dict):
volname = section_dict.get('volname')
if not volname:
return section_dict
section_dict['volname'] = self.split_volume_and_hostname(volname)
return section_dict
def perf_spec_data_write(self):
'''
Now this one looks dirty. Couldn't help it.
This one reads the performance related data like
number of data disks and stripe unit size if
the option disk type is provided in the config.
Some calculations are made as to enhance
performance
'''
Global.logger.info("Performing GlusterFS specific performance tuning.")
disktype = self.config_get_options('disktype', False)
if disktype:
perf = dict(disktype=disktype[0].lower())
if perf['disktype'] not in ['raid10', 'raid6', 'jbod']:
msg = "Unsupported disk type!"
print "\nError: " + msg
Global.logger.error(msg)
self.cleanup_and_quit()
if perf['disktype'] != 'jbod':
diskcount = self.config_get_options('diskcount', True)
perf['diskcount'] = int(diskcount[0])
stripe_size = self.config_get_options('stripesize', False)
if not stripe_size and perf['disktype'] == 'raid6':
msg = "Error: 'stripesize' not provided for disktype %s"\
% perf['disktype']
print msg
Global.logger.error(msg)
self.cleanup_and_quit()
if stripe_size:
perf['stripesize'] = int(stripe_size[0])
if perf['disktype'] == 'raid10' and perf[
'stripesize'] != 256:
warn = "Warning: We recommend a stripe unit size of 256KB " \
"for RAID 10"
Global.logger.warning(warn)
if warn not in Global.warnings:
Global.warnings.append(warn)
else:
| perf['stripesize'] = 256
perf['dalign'] = {
'raid6': perf['stripesize'] * perf['diskcount'],
'raid10': perf['stripesize'] * perf['diskcount']
}[perf['di | sktype']]
else:
perf['dalign'] = 256
perf['diskcount'] = perf['stripesize'] = 0
else:
perf = dict(disktype='jbod')
perf['dalign'] = 256
perf['diskcount'] = perf['stripesize'] = 0
self.create_var_files(perf, False, Global.group_file)
def create_inventory(self):
if not os.path.isfile(Global.inventory):
self.touch_file(Global.inventory)
Global.current_hosts and self.write_config(
Global.group,
Global.current_hosts,
Global.inventory)
try:
if not Global.master:
# We set the `master' variable in group_vars/all file here.
# If brick_hosts are present use one of them as master.
# Else use one from Global.current_hosts.
#
# Previous solution: [list(set(Global.current_hosts) - set(
# Global.brick_hosts))[0]] fails when length of
# Global.current_hosts was equal to Global.brick_hosts
# Or less than Global.brick_hosts
if Global.brick_hosts:
Global.master = Global.brick_hosts[0]
else:
Global.master = Global.current_hosts[0]
except:
pass
try:
hostname = Global.master or Global.current_hosts[0]
except:
print "\nWarning: Insufficient host names or IPs. Running " \
"in the localhost"
hostname = "127.0.0.1"
self.write_config('master', hostname, Global.inventory)
def call_config_parser(self):
config = ConfigParser.ConfigParser(allow_no_value=True)
config.optionxform = str
return config
def remove_section(self, filename, section):
config = self.read_config(filename)
try:
config.remove_section(section)
except:
pass
with open(filename, 'w+') as out:
config.write(out)
def read_config(self, config_file):
config_parse = self.call_config_parser()
try:
config_parse.read(config_file)
return config_parse
except:
print "Sorry! Looks like the format of configuration " \
"file %s is not something we could read! \nTry removing " \
"whitespaces or unwanted characters in the configuration " \
"file." % config_file
self.cleanup_and_quit()
def write_to_inventory(self, section, options):
self.write_config(section, options, Global.inventory)
def write_config(self, section, options, filename):
self.remove_section(filename, section)
config = self.call_config_parser()
config.add_section(section)
if type(options) is not dict:
options = self.pattern_stripping(options)
for option in options:
config.set(section, option)
else:
for k, v in options.iteritems():
v = ','.join(self.pattern_stripping(v))
config.set(section, k , v)
try:
with open(filename, 'ab') as f:
config.write(f)
except:
|
import unittest
from gi.repository import GElektra as kdb
TEST_NS = "user/tests/gi_py3"
class Constants(unittest.TestCase):
def setUp(self):
pass
def test_kdbconfig_h(self):
self.assertIsInstance(kdb.DB_SYSTEM, str)
self.assertIsInstance(kdb.DB_USER, str)
self.assertIsInstance(kdb.DB_HOME, str)
self.assertIsInstance(kdb.DEBUG, int)
def test_kdb_h(self):
self.assertIsInstance(kdb.VERSION, str)
self.assertIsInstance(kdb.VERSION_MAJOR, int)
self.assertIsInstance(kdb.VERSION_MINOR, int)
self.assertIsInsta | nce(kdb.VERSION_MICRO, int)
self.assertIsNone(kdb.KS_END)
class KDB(unittest.TestCase):
def test_ctor(self):
self.assertIsInstance(kdb.KD | B(), kdb.KDB)
error = kdb.Key()
self.assertIsInstance(kdb.KDB(error), kdb.KDB)
def test_get(self):
with kdb.KDB() as db:
ks = kdb.KeySet()
db.get(ks, "system/elektra")
import os
if os.getenv("CHECK_VERSION") is None:
key = ks["system/elektra/version/constants/KDB_VERSION"]
self.assertEqual(key.value, kdb.VERSION)
def test_set(self):
with kdb.KDB() as db:
ks = kdb.KeySet(100)
db.get(ks, TEST_NS)
try:
key = ks[TEST_NS + "/mykey"]
except KeyError:
key = kdb.Key(TEST_NS + "/mykey")
ks.append(key)
key.value = "new_value"
db.set(ks, TEST_NS)
with kdb.KDB() as db:
ks = kdb.KeySet(100)
db.get(ks, TEST_NS)
self.assertEqual(ks[TEST_NS + "/mykey"].value, "new_value")
def tearDownClass():
# cleanup
with kdb.KDB() as db:
ks = kdb.KeySet(100)
db.get(ks, TEST_NS)
ks.cut(kdb.Key(TEST_NS))
db.set(ks, TEST_NS)
if __name__ == '__main__':
unittest.main()
|
#
# Read KRW-USD rates
#
# We have data from 1994 to 2009
years = range(1994, 2010)
# read one year into list data
def read_year(yr, data):
fname = "data/%d.txt" % yr
f = open(fname, "r")
for l in f:
date1, value1 = l.split()
value = float(value1)
# convert to KRW per USD
value = int(1.0 / value)
# convert YYYY/MM/DD string to int
ys, ms, ds = date1.split("/")
date = 10000 * int(ys) + 100 * int(ms) + int(ds)
data.append((date, value))
f.close()
# read all files and return list
def read_all():
data = []
for yr in years:
read_year(yr, data)
return data
# compute average exchange rate for yr
def average(data, yr):
sum = 0
count = 0
start = yr * 10000
end = (yr + 1) * 10000
for d, v in data:
if start <= d < end:
sum += v
count += 1
return sum / count
def find_min(data):
vm = 99999
dm = None
f | or d, v in data:
if v < vm:
vm = v
dm = d
return dm, vm
def find_max(data):
vm = 0
dm = None
for d, v in data:
if v > vm:
vm = v
dm = d
return dm, vm
def main():
data = read_all()
print "Minimum:", find_min(data) |
print "Maximum:", find_max(data)
for yr in years:
avg = average(data, yr)
print yr, avg
main()
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os, sys
from ipdl.ast import Visitor
from ipdl.ast import IN, OUT, INOUT, ASYNC, SYNC, RPC
class CodePrinter:
def __init__(self, outf=sys.stdout, indentCols=4):
self.outf = outf
self.col = 0
self.indentCols = indentCols
def write(self, str):
self.outf.write(str)
def printdent(self, str=''):
self.write((' '* self.col) + str)
def println(self, str=''):
self.write(str +'\n')
def printdentln(self, str):
self.write((' '* self.col) + str +'\n')
def indent(self): self.col += self.indentCols
def dedent(self): self.col -= self.indentCols
##-----------------------------------------------------------------------------
class IPDLCodeGen(CodePrinter, Visitor):
'''Spits back out equivalent IPDL to the code that generated this.
Also known as pretty-printing.'''
def __init__(self, outf=sys.stdout, indentCols=4, printed=set()):
CodePrinter.__init__(self, outf, indentCols)
self.printed = printed
def visitTranslationUnit(self, tu):
self.printed.add(tu.filename)
self.println('//\n// Automatically generated by ipdlc\n//')
CodeGen.visitTranslationUnit(self, tu)
def visitCxxInclude(self, inc):
self.println('include "'+ inc.file +'";')
def visitProtocolInclude(self, inc):
self.println('include protocol "'+ inc.file +'";')
if inc.tu.filename not in self.printed:
self.println('/* Included file:')
IPDLCodeGen(outf=self.outf, indentCols=self.indentCols,
printed=self.printed).visitTranslationUnit(inc.tu)
self.println('*/')
def visitProtocol(self, p):
self.println()
for namespace in p.namespaces: namespace.accept(self)
self.println('%s protocol %s\n{'% (p.sendSemantics[0], p.name))
self.indent()
for mgs in p.managesStmts:
mgs.accept(self)
if len(p.managesStmts): self.println()
for msgDecl in p.mess | ageDecls: msgDecl.accept(self)
self.println()
for transStmt in p.transitionStmts: transStmt.accept(self)
| self.dedent()
self.println('}')
self.write('}\n'* len(p.namespaces))
def visitManagerStmt(self, mgr):
self.printdentln('manager '+ mgr.name +';')
def visitManagesStmt(self, mgs):
self.printdentln('manages '+ mgs.name +';')
def visitMessageDecl(self, msg):
self.printdent('%s %s %s('% (msg.sendSemantics[0], msg.direction[0], msg.name))
for i, inp in enumerate(msg.inParams):
inp.accept(self)
if i != (len(msg.inParams) - 1): self.write(', ')
self.write(')')
if 0 == len(msg.outParams):
self.println(';')
return
self.println()
self.indent()
self.printdent('returns (')
for i, outp in enumerate(msg.outParams):
outp.accept(self)
if i != (len(msg.outParams) - 1): self.write(', ')
self.println(');')
self.dedent()
|
#!/usr/bin/env python
# Copyright (c) 2013. Mark E. Madsen <mark@madsenlab.org>
#
# This work is licensed under the terms of the Apache Software License, Version 2.0. See the file LICENSE for details.
| """
Description here
"""
import logging as log
def check_liveness(ax, model, args, simco | nfig, timestep):
diff = timestep - model.get_time_last_interaction()
num_links = model.agentgraph.number_of_edges()
if (diff > (5 * num_links)):
#log.debug("No interactions have occurred since %s - for %s ticks, which is 5 * %s network edges", model.get_time_last_interaction(), diff, num_links)
if ax.get_fraction_links_active() == 0.0:
log.debug("No active links found in the model, clear to finalize")
return False
else:
return True
else:
return True
|
#!/usr/bin/env python3
import sys
import os
import time
import atexit
from signal import SIGTERM
import logging
import logging.handlers
# load the config
from foostat import config
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
filehandler = logging.handlers.TimedRotatingFileHandler(config["files"]["log_file"], when='midnight', interval=1,
backupCount=10)
filehandler.setFormatter(logging.Formatter(fmt='%(asctime)s %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S'))
logger.addHandler(filehandler)
class Daemon(object):
"""
Subclass Daemon class and override the run() method.
"""
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
def daemonize(self):
"""
Deamonize, do double-fork magic.
"""
try:
pid = os.fork()
if pid > 0:
# Exit first parent.
sys.exit(0)
except OSError as e:
message = "Fork #1 failed: {}\n".format(e)
sys.stde | rr.write(message)
sys.exit(1)
# Decouple from parent environment.
os.chdir("/")
os.setsid()
os.umask(0)
# Do second fork.
try:
pid = os.fork()
if pid > 0:
# Exit from second parent.
sys.exit(0)
except OSError as e:
message = "Fork #2 failed: {}\n".format(e)
sys.stderr.write(message)
logger.info('deamon going to background, PID: {}'.format(os.getpi | d()))
# Redirect standard file descriptors.
sys.stdout.flush()
sys.stderr.flush()
si = open(self.stdin, 'r')
so = open(self.stdout, 'a+')
se = open(self.stderr, 'a+')
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# fix current dir to script's dir
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# Write pidfile.
pid = str(os.getpid())
open(self.pidfile, 'w+').write("{}\n".format(pid))
# Register a function to clean up.
atexit.register(self.delpid)
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""
Start daemon.
"""
# Check pidfile to see if the daemon already runs.
try:
pf = open(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "Pidfile {} already exist. Daemon already running?\n".format(self.pidfile)
sys.stderr.write(message)
sys.exit(1)
# Start daemon.
self.daemonize()
self.run()
def status(self):
"""
Get status of daemon.
"""
try:
pf = open(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
message = "There is no PID file. Daemon already running?\n"
sys.stderr.write(message)
sys.exit(1)
try:
procfile = open("/proc/{}/status".format(pid), 'r')
procfile.close()
message = "There is a process with the PID {}\n".format(pid)
sys.stdout.write(message)
except IOError:
message = "There is not a process with the PID {}\n".format(self.pidfile)
sys.stdout.write(message)
def stop(self):
"""
Stop the daemon.
"""
# Get the pid from pidfile.
try:
pf = open(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError as e:
message = str(e) + "\nDaemon not running?\n"
sys.stderr.write(message)
sys.exit(1)
# Try killing daemon process.
try:
os.kill(pid, SIGTERM)
time.sleep(1)
except OSError as e:
print(str(e))
sys.exit(1)
try:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
except IOError as e:
message = str(e) + "\nCan not remove pid file {}".format(self.pidfile)
sys.stderr.write(message)
sys.exit(1)
def restart(self):
"""
Restart daemon.
"""
self.stop()
time.sleep(1)
self.start()
def run(self):
"""
You should override this method when you subclass Daemon.
It will be called after the process has been daemonized by start() or restart().
Example:
class MyDaemon(Daemon):
def run(self):
while True:
time.sleep(1)
""" |
from bs4 import BeautifulSoup
import time
from selenium import webdriver
from pyvirtualdisplay import Display
__PROCESSOR = 'lxml'
__OFFERS_CLASS = 'offer-listing'
__PRICE_CLASS = 'dollars'
__FLIGHT_DURATION_CLASS = 'duration-emphasis'
__LAYOVR_CLASS = ''
__AIRLINE_CLASS = ''
def get_page_offers(url):
display = Display(visible=0, size=(800, 600))
display.start()
browser = webdriver.Firefox()
ffResults = browser.get(url)
time.sleep(25)
full_content = browser.execute_script("return document.getElementsByTagName('html')[0].innerHTML")
browser.quit()
display.stop()
return __parse_offers_page(full_content)
def __parse_offers_page(html_content):
offers_list = __parse_offers_list(html_content)
prices_list = []
for offer in offers_list:
offer_object = __get_offer_object(offer)
if off | er_object is not None:
prices_list.append(offer_object)
return prices_list
def __parse_offers_list(html_content):
soup = BeautifulSoup(html_content, __PROCESSOR)
offers = soup.find_all('li', class_=__OFFERS_CLASS)
return offers
def __get_offer_object(offer_html):
offer_price = __get_offer_price(offer_html)
offer_duration = __get_offer_duration(offer_html)
offer_airline = __get_offer_airline(offer_html)
if offer_price is | not None and offer_duration is not None and offer_airline is not None:
return {'price': offer_price.strip(), 'duration': offer_duration.strip(), 'airline': offer_airline.strip()}
def __get_offer_price(offer_html):
offer_element = __find_element_using_class(offer_html, 'span', __PRICE_CLASS)
if offer_element is not None:
return __find_element_using_class(offer_html, 'span', __PRICE_CLASS).text
def __get_offer_duration(offer_html):
return __find_element_using_class(offer_html, 'div', __FLIGHT_DURATION_CLASS).text
def __find_elements_using_class(html_content, element, css_class):
soup = BeautifulSoup(html_content, __PROCESSOR)
return soup.find_all(element, class_=css_class)
def __find_element_using_class(html_content, element, css_class):
return html_content.find(element, class_=css_class)
def __get_offer_airline(offer_html):
return offer_html.find('div', {'data-test-id': 'airline-name'}).text
|
"""Procedures to initialize the full text search in PostgresQL"""
from django.db import connection
def setup_full_text_search(script_path):
"""using pos | tgresql database connection,
installs the plsql language, if necessary
and runs the stript, whose path is given as an argument
"""
fts_init_query = open(script_path).read()
cursor = connection.cursor()
try:
#test if language exists
cursor.execute("SELECT * FROM pg_language WHERE lanname='plpgsql'")
lang_exists = cursor.fetchone()
if not lang_exists:
cursor.execute("CREATE LANGUAGE plpgs | ql")
#run the main query
cursor.execute(fts_init_query)
finally:
cursor.close()
|
################################################################################
#
# Copyright 2015-2020 Félix Brezo and Yaiza Rubio
#
# This program is part of OSRFramework. You can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
__author__ = "Felix Brezo, Yaiza Rubio <contacto@i3visio.com>"
__version__ = "2.0"
from osrframework.utils.platforms import Platform
class Forocoches(Platform):
"""A <Platform> object for Forocoches"""
def __init__(self):
self.platformName = "Forocoches"
self.tags = ["opinions", "activism"]
# Add the URL for enumeration below
#self.urlEnumeration = "http://www.forocoches.com | /foro/member.php?u=" + "<HERE_GOES_THE_USER_ID>"
########################
# Defining valid modes #
########################
self.isValidMode = {}
self.isValidMode["phonefy"] = False
self.isValidMode["usufy"] = True
self.isValidMode["searchfy"] = | False
######################################
# Search URL for the different modes #
######################################
# Strings with the URL for each and every mode
self.url = {}
#self.url["phonefy"] = "http://anyurl.com//phone/" + "<phonefy>"
self.url["usufy"] = "http://www.forocoches.com/foro/member.php?username=" + "<usufy>"
#self.url["searchfy"] = "http://anyurl.com/search/" + "<searchfy>"
######################################
# Whether the user needs credentials #
######################################
self.needsCredentials = {}
#self.needsCredentials["phonefy"] = False
self.needsCredentials["usufy"] = False
#self.needsCredentials["searchfy"] = False
#################
# Valid queries #
#################
# Strings that will imply that the query number is not appearing
self.validQuery = {}
# The regular expression '.+' will match any query.
#self.validQuery["phonefy"] = ".*"
self.validQuery["usufy"] = ".+"
#self.validQuery["searchfy"] = ".*"
###################
# Not_found clues #
###################
# Strings that will imply that the query number is not appearing
self.notFoundText = {}
#self.notFoundText["phonefy"] = []
self.notFoundText["usufy"] = ["main error message"]
#self.notFoundText["searchfy"] = []
#########################
# Fields to be searched #
#########################
self.fieldsRegExp = {}
# Definition of regular expressions to be searched in phonefy mode
#self.fieldsRegExp["phonefy"] = {}
# Example of fields:
#self.fieldsRegExp["phonefy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in usufy mode
self.fieldsRegExp["usufy"] = {}
# Example of fields:
#self.fieldsRegExp["usufy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in searchfy mode
#self.fieldsRegExp["searchfy"] = {}
# Example of fields:
#self.fieldsRegExp["searchfy"]["i3visio.location"] = ""
################
# Fields found #
################
# This attribute will be feeded when running the program.
self.foundFields = {}
|
if coords[2 * left + inc] == t:
swap_item(ids, coords, left, j)
else:
j += 1
swap_item(ids, coords, j, right)
if j <= k:
left = j + 1
if k <= j:
right = j - 1
def _swap_item(self, ids, coords, i, j):
swap = self._swap
swap(ids, i, j)
swap(coords, 2 * i, 2 * j)
swap(coords, 2 * i + 1, 2 * j + 1)
def _swap(self, arr, i, j):
tmp = arr[i]
arr[i] = arr[j]
arr[j] = tmp
def _range(self, ids, coords, min_x, min_y, max_x, max_y, node_size):
stack = [0, len(ids) - 1, 0]
result = []
x = y = 0
while stack:
axis = stack.pop()
right = stack.pop()
left = stack.pop()
if right - left <= node_size:
for i in range(left, right + 1):
x = coords[2 * i]
y = coords[2 * i + 1]
if (x >= min_x and x <= max_x and y >= min_y and
y <= max_y):
result.append(ids[i])
continue
m = int(floor((left + right) / 2.))
x = coords[2 * m]
y = coords[2 * m + 1]
if (x >= min_x and x <= max_x and y >= min_y and y <= max_y):
result.append(ids[m])
nextAxis = (axis + 1) % 2
if (min_x <= x if axis == 0 else min_y <= y):
stack.append(left)
stack.append(m - 1)
stack.append(nextAxis)
if (max_x >= x if axis == 0 else max_y >= y):
stack.append(m + 1)
stack.append(right)
stack.append(nextAxis)
return result
def _within(self, ids, coords, qx, qy, r, node_size):
sq_dist = self._sq_dist
stack = [0, len(ids) - 1, 0]
result = []
r2 = r * r
while stack:
axis = stack.pop()
right = stack.pop()
left = stack.pop()
if right - left <= node_size:
for i in range(left, right + 1):
if sq_dist(coords[2 * i], coords[2 * i + 1], qx, qy) <= r2:
result.append(ids[i])
continue
m = int(floor((left + right) / 2.))
x = coords[2 * m]
y = coords[2 * m + 1]
if sq_dist(x, y, qx, qy) <= r2:
result.append(ids[m])
nextAxis = (axis + 1) % 2
if (qx - r <= x) if axis == 0 else (qy - r <= y):
stack.append(left)
stack.append(m - 1)
stack.append(nextAxis)
if (qx + r >= x) if axis == 0 else (qy + r >= y):
stack.append(m + 1)
| stack.append(right)
stack.append(nextAxis)
return result
def _sq_dist(self, ax, ay, bx, by):
dx = ax - bx
dy = ay - by
return dx * dx + dy * dy
class Cluster(object):
def __init__(self, x, y, num_points, id, props):
super(Cluster, self).__init__()
self.x = x
self.y = y
self.num_po | ints = num_points
self.zoom = float("inf")
self.id = id
self.props = props
self.parent_id = None
self.widget = None
# preprocess lon/lat
self.lon = xLng(x)
self.lat = yLat(y)
class Marker(object):
def __init__(self, lon, lat, cls=MapMarker, options=None):
super(Marker, self).__init__()
self.lon = lon
self.lat = lat
self.cls = cls
self.options = options
# preprocess x/y from lon/lat
self.x = lngX(lon)
self.y = latY(lat)
# cluster information
self.id = None
self.zoom = float("inf")
self.parent_id = None
self.widget = None
def __repr__(self):
return "<Marker lon={} lat={} source={}>".format(self.lon, self.lat,
self.source)
class SuperCluster(object):
"""Port of supercluster from mapbox in pure python
"""
def __init__(self,
min_zoom=0,
max_zoom=16,
radius=40,
extent=512,
node_size=64):
super(SuperCluster, self).__init__()
self.min_zoom = min_zoom
self.max_zoom = max_zoom
self.radius = radius
self.extent = extent
self.node_size = node_size
def load(self, points):
"""Load an array of markers.
Once loaded, the index is immutable.
"""
from time import time
self.trees = {}
self.points = points
for index, point in enumerate(points):
point.id = index
clusters = points
for z in range(self.max_zoom, self.min_zoom - 1, -1):
start = time()
print "build tree", z
self.trees[z + 1] = KDBush(clusters, self.node_size)
print "kdbush", (time() - start) * 1000
start = time()
clusters = self._cluster(clusters, z)
print(len(clusters))
print "clustering", (time() - start) * 1000
self.trees[self.min_zoom] = KDBush(clusters, self.node_size)
def get_clusters(self, bbox, zoom):
"""For the given bbox [westLng, southLat, eastLng, northLat], and
integer zoom, returns an array of clusters and markers
"""
tree = self.trees[self._limit_zoom(zoom)]
ids = tree.range(lngX(bbox[0]), latY(bbox[3]), lngX(bbox[2]), latY(bbox[1]))
clusters = []
for i in range(len(ids)):
c = tree.points[ids[i]]
if isinstance(c, Cluster):
clusters.append(c)
else:
clusters.append(self.points[c.id])
return clusters
def _limit_zoom(self, z):
return max(self.min_zoom, min(self.max_zoom + 1, z))
def _cluster(self, points, zoom):
clusters = []
c_append = clusters.append
trees = self.trees
r = self.radius / float(self.extent * pow(2, zoom))
# loop through each point
for i in range(len(points)):
p = points[i]
# if we've already visited the point at this zoom level, skip it
if p.zoom <= zoom:
continue
p.zoom = zoom
# find all nearby points
tree = trees[zoom + 1]
neighbor_ids = tree.within(p.x, p.y, r)
num_points = 1
if isinstance(p, Cluster):
num_points = p.num_points
wx = p.x * num_points
wy = p.y * num_points
props = None
for j in range(len(neighbor_ids)):
b = tree.points[neighbor_ids[j]]
# filter out neighbors that are too far or already processed
if zoom < b.zoom:
num_points2 = 1
if isinstance(b, Cluster):
num_points2 = b.num_points
b.zoom = zoom # save the zoom (so it doesn't get processed twice)
wx += b.x * num_points2 # accumulate coordinates for calculating weighted center
wy += b.y * num_points2
num_points += num_points2
b.parent_id = i
if num_points == 1:
c_append(p)
else:
p.parent_id = i
c_append(Cluster(wx / num_points, wy / num_points, num_points, i, props))
return clusters
class ClusterMapMarker(MapMarker):
source = StringProperty(join(dirname(__file__), "icons", "cluster.png"))
cluster = ObjectProperty()
num_points = NumericProperty()
text_color = ListProperty([.1, .1, .1, 1])
def on_cluster(self, instance, cluster):
self.num_points = cluster.num_points
def on_touch_down(self, touch):
return False
class ClusteredMarkerLayer(MapLayer):
cluster_cls = ObjectProperty(ClusterMapMarker)
cluster_min_zoom = NumericProperty(0)
|
import os
import string
import random
from fabric.api import env
from fabric.colors import green
from literals import (DEFAULT_INSTALL_PATH, DEFAULT_VIRTUALENV_NAME,
DEFAULT_REPOSITORY_NAME, DEFAULT_OS, OS_CHOICES,
DEFAULT_DATABASE_MANAGER, DB_CHOICES, DEFAULT_DATABASE_NAME,
DEFAULT_WEBSERVER, WEB_CHOICES, DEFAULT_DATABASE_USERNAME,
DJANGO_DB_DRIVERS, DEFAULT_DATABASE_HOST, DEFAULT_PASSWORD_LENGTH)
from server_config import reduce_env
def password_generator():
# http://snipplr.com/view/63223/python-password-generator/
chars = string.ascii_letters + string.digits
return ''.join(random.choice(chars) for x in range(DEFAULT_PASSWORD_LENGTH))
@reduce_env
def setup_environment():
env['os'] = getattr(env, 'os', DEFAULT_OS)
env['os_name'] = O | S_CHOICES[env.os]
env['install_path'] = getattr(env, 'install_path', DEFAULT_INSTALL_PATH[env.os])
env['virtualenv_name'] = getattr(env, 'virtualenv_name', DEFAULT_VIRTUALENV_NAME[env.os])
env['repository_name'] = getattr(env, 'repository_name', DEFAULT_REPOSITORY_NAME[env.os])
env['virtualenv_path'] = os.path.join(env.install_p | ath, env.virtualenv_name)
env['repository_path'] = os.path.join(env.virtualenv_path, env.repository_name)
env['database_manager'] = getattr(env, 'database_manager', DEFAULT_DATABASE_MANAGER)
env['database_manager_name'] = DB_CHOICES[env.database_manager]
env['database_username'] = getattr(env, 'database_username', DEFAULT_DATABASE_USERNAME)
env['database_password'] = getattr(env, 'database_password', password_generator())
env['database_host'] = getattr(env, 'database_host', DEFAULT_DATABASE_HOST)
env['drop_database'] = getattr(env, 'drop_database', False)
if not getattr(env, 'database_manager_admin_password', None):
print('Must set the database_manager_admin_password entry in the fabric settings file (~/.fabricrc by default)')
exit(1)
env['database_name'] = getattr(env, 'database_name', DEFAULT_DATABASE_NAME)
env['webserver'] = getattr(env, 'webserver', DEFAULT_WEBSERVER)
env['webserver_name'] = WEB_CHOICES[env.webserver]
env['django_database_driver'] = DJANGO_DB_DRIVERS[env.database_manager]
def print_supported_configs():
print('Supported operating systems (os=): %s, default=\'%s\'' % (dict(OS_CHOICES).keys(), green(DEFAULT_OS)))
print('Supported database managers (database_manager=): %s, default=\'%s\'' % (dict(DB_CHOICES).keys(), green(DEFAULT_DATABASE_MANAGER)))
print('Supported webservers (webserver=): %s, default=\'%s\'' % (dict(WEB_CHOICES).keys(), green(DEFAULT_WEBSERVER)))
print('\n')
|
from django.conf.urls import patterns, url, include
urlpatterns = patterns('',
('', include('imago.urls')),
url(r'^report/(?P<module_na | me>[a-z0-9_]+)/$', 'reports.views.report', name='report'),
url(r'^represent/(?P<module_name>[a-z0-9_]+)/$', 'reports.views.represent', name='represent'),
url(r'^warnings/$', ' | reports.views.warnings', name='warnings'),
url(r'^$', 'reports.views.home', name='home'),
)
|
from django.http import Http404
from django.shortcuts import render
from . import models
def index(request):
# Generate counts of some of the main objects
num_projects = models.Project.objects.all().count()
num_tasks = models.Tasks.objects.all().count()
num_fundings = models.Fundings.objects.all().count()
num_projects_goal = (models.Project.objects.all().count() / 10) * 100;
projects = models.Project.objects.all()[:4 | ]
featured_projects = models.Project.objects.all().filter(featured=1)
# Render the HTML template
return render(
request,
'home.html',
context={'projects': projects, 'featured': featured_projects, 'num_projects':num_projects, 'num_ | tasks':num_tasks, 'num_fundings':num_fundings, 'num_projects_goal':num_projects_goal},
)
def project(request, slug):
try:
p = models.Project.objects.get(slug=slug)
except models.Project.DoesNotExist:
raise Http404("Project does not exist")
return render(
request,
'sample.html',
context={'project': p}
) |
from hel | per import greeting
if "__name__" == "__main__":
greeti | ng('hello') |
from datetime import timedelta
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
from openerp.osv import orm
import openerp.tests.common as test_common
from .common import BaseAgreementTestMixin
class TestAgreementPriceList(test_common.TransactionCase,
BaseAgreementTestMixin):
"""Test observer on change and purchase order on chnage"""
def setUp(self):
""" Create a default agreement
with 3 price line
qty 0 price 70
qty 200 price 60
qty 500 price 50
qty 1000 price 45
"""
super(TestAgreementPriceList, self).setUp()
self.commonsetUp()
cr, uid = self.cr, self.uid
start_date = self.now + timedelta(days=10)
start_date = start_date.strftime(DEFAULT_SERVER_DATE_FORMAT)
end_date = self.now + timedelta(days=20)
end_date = end_date.strftime(DEFAULT_SERVER_DATE_FORMAT)
agr_id = self.agreement_model.create(cr, uid,
{'supplier_id': self.supplier_id,
'product_id': self.product_id,
'start_date': start_date,
'end_date': end_date,
'delay': 5,
'draft': False,
'quantity': 1500})
pl_id = self.agreement_pl_model.create(cr, uid, {
'framework_agreement_id': agr_id,
'currency_id': self.ref('base.EUR')})
self.agreement_line_model.create(cr, uid, {
'framework_agreement_pricelist_id': pl_id,
'quantity': 0,
'price': 70.0})
self.agreement_line_model.create(cr, uid, {
'framework_agreement_pricelist_id': pl_id,
'quantity': 200,
'price': 60.0})
self.agreement_line_model.create(cr, uid, {
'framework_agreement_pricelist_id': pl_id,
'quantity': 500,
'price': 50.0})
self.agreement_line_model.create(cr, uid, {
'framework_agreement_pricelist_id': pl_id,
'quantity': 1000,
'price': 45.0})
self.agreement = self.agreement_model.browse(cr, uid, agr_id)
def test_00_test_qty(self):
"""Test if barem retrieval is correct"""
self.assertEqual(
self.agreement.get_price(0, currency=self.browse_ref('base.EUR')),
70.0)
self.assertEqual(
self.agreement.get_price(
100, currency=self.browse_ref('base.EUR')
), 70.0)
self.assertEqual(
self.agreement.get_price(
200, currency=self.browse_ref('base.EUR')
), 60.0)
self.assertEqual(
self.agreement.get_price(
210, currency=self.browse_ref('base.EUR')
), 60.0)
self.assertEqual(
self.agreement.get_price(
500, currency=self.browse_ref('base.EUR')
), 50.0)
self.assertEqual(
self.agreement.get_price(
800, currency=self.browse_ref('base.EUR')
), 50.0)
self.assertEqual(
self.agreement.get_price(
999, currency=self.browse_ref('base.EUR')
), 50.0)
self.assertEqual(
self.agreement.get_price(
1000, currency=self.browse_ref('base.EUR')
), 45.0)
self.assertEqual(
self.agreement.get_price(
10000, currency=self.browse_ref('base.EUR')
| ), 45.0)
self.assertEqual(
self.agreement.get_price(
| -10, currency=self.browse_ref('base.EUR')
), 70.0)
def test_01_failed_wrong_currency(self):
"""Tests that wrong currency raise an exception"""
with self.assertRaises(orm.except_orm):
self.agreement.get_price(0, currency=self.browse_ref('base.USD'))
|
from fabric.context_managers import settings
from golive.layers.base import BaseTask, DebianPackageMixin, IPTablesSetup
from golive.stacks.stack import environment
from golive.utils import get_remote_envvar
class RabbitMqSetup(BaseTask, DebianPackageMixin):
package_name = "rabbitmq-server"
GUEST_USER = "guest"
RABBITMQ_CONFIGFILE = "/etc/rabbitmq/rabbitmq.config"
RABBIT_INITSCRIPT = "/etc/init.d/rabbitmq-server"
NAME = "RABBITMQ"
VAR_BROKER_USER = "GOLIVE_BROKER_USER"
VAR_BROKER_PASSWORD = "GOLIVE_BROKER_PASSWORD"
ROLE = "QUEUE_HOST"
def init(self, update=True):
# add repo for rabbitmq
self._add_repo()
self.sudo("apt-get update")
DebianPackageMixin.init(self, update)
self._set_listen_port()
allow = [
(environment.hosts, IPTablesSetup.DESTINATION_ALL, "9101:9105"),
| (environment.hosts, IPTablesSetup.DESTINATION_ALL, "4369"),
(environment.hosts, IPTablesSetup.DESTINATION_ALL, "8612"),
(environment.hosts, IPTablesSetup.DESTINATION_ALL, "5672"),
]
iptables = IPTablesSetup()
iptables.prepare_rules(allow)
iptables.set_rules(self.__class__.__name__)
iptables.activate()
self._delete_user(self.__class__.GUEST_USER)
def deploy(self):
self._create_user()
| def status(self):
out = self.run("sudo %s status" % self.RABBIT_INITSCRIPT)
self._check_output(out, "running_applications", self.NAME)
def _set_listen_port(self):
self.append(self.__class__.RABBITMQ_CONFIGFILE,
"[{kernel, [ {inet_dist_listen_min, 9100}, {inet_dist_listen_max, 9105} ]}].")
def _add_repo(self):
# as described at http://www.rabbitmq.com/install-debian.html
self.append("/etc/apt/sources.list", "deb http://www.rabbitmq.com/debian/ testing main")
self.sudo("wget http://www.rabbitmq.com/rabbitmq-signing-key-public.asc")
self.sudo("apt-key add rabbitmq-signing-key-public.asc")
def _create_user(self):
username = get_remote_envvar(self.VAR_BROKER_USER, environment.get_role(self.ROLE).hosts[0])
password = get_remote_envvar(self.VAR_BROKER_PASSWORD, environment.get_role(self.ROLE).hosts[0])
with settings(warn_only=True):
self.sudo("rabbitmqctl add_user %s %s" % (username, password))
self.sudo("rabbitmqctl set_permissions -p / %s \".*\" \".*\" \".*\"" % username)
# TODO: create vhost
def _delete_user(self, username):
with settings(warn_only=True):
self.sudo("rabbitmqctl delete_user %s" % username)
|
from __future__ import absolute_import
__all__ = ("DebugMeta",)
from sentry.interfaces.base import Interface
from sentry.utils.json import prune_empty_keys
class DebugMeta(Interface):
"""
Holds debug meta information for processing stacktraces
and similar things. This information is deleted after event processing.
Currently two attributes exist:
``sdk_info``:
sets the SDK that is used for the system. This affects the lookup
| for system symbols. If not defined, system symbols are not looked up.
``images``:
a list of debug images and their mappings.
"""
ephemeral = False
path = "debug_meta"
external_type = "debugmeta"
@classmethod
def to_python(cls, data):
return cls(
ima | ges=data.get("images", None) or [],
sdk_info=data.get("sdk_info"),
is_debug_build=data.get("is_debug_build"),
)
def to_json(self):
return prune_empty_keys(
{
"images": self.images or None,
"sdk_info": self.sdk_info or None,
"is_debug_build": self.is_debug_build,
}
)
|
#!/usr/bin/python
import sys
import os
if len(sys | .argv) >= 4 :
filename = sys.argv[1]
row_i = int(sys.argv[2])-1
target_ls_filename = sys.argv[3]
output_filename = sys.argv[4]
else:
print("usage: python selectrow.py filename row_i target_ls_filename")
print("or ./selectrow.py filename row_i target_ls_filename")
sys.exit(1)
############################################## | ##################################
file = open(filename,'r')
dt = {}
for line in file:
ls=line.strip().split('\t')
if not dt.has_key(ls[row_i]):
dt[ ls[row_i] ] = []
dt[ ls[row_i] ].append( line.strip() )
file.close()
################################################################################
output = open(output_filename,'w')
target_ls_file = open(target_ls_filename, 'r')
for line in target_ls_file:
id = line.strip()
if not dt.has_key(id):
print id
continue
if len(dt[id])>1:
print id + '\t' + str(len(dt[id]))
for item in dt[id]:
output.write( item + '\n')
output.close()
target_ls_file.close()
|
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from .forms import LoginForm
class LoginViewTest(TestCase):
def setUp(self):
self.client = Client()
self.response = self.client.get(reverse('login:login'))
def tearDown(self):
self.client.logout()
def test_get(self):
self.assertEqual(self.response.status_code, 200)
def test_template(self):
self.assertTemplateUsed(self.response, 'login.html')
def test_html(self):
'HTML must have contain 3 inputs(user, pass and csrf token) and a submit'
self.assertContains(self.response, '<input', 3)
self.assertContains(self.response, 'submit')
class LoginPostTest(TestCase):
def setUp(self):
user = User.objects.create_user(
'admin', 'admin@admin.com', '123'
)
self.client = Client()
def tearDown(self):
self.client.logout()
User.objects.all().delete()
def test_already_logged(self):
'If already logged, will have a redirect, so, must return code 302'
self.response = self.client.post(
reverse('login:login'), self.make_validated_data()
)
self.response = self.client.get(reverse('login:login'))
self.assertEqual(self.response.status_code, 302)
def test_valid_login(self):
'With valid login, will have a redirect, so, must return code 302'
self.response = self.client.post(
reverse('login:login'), self.make_validated_data()
)
self.assertEqual(self.response.status_code, 302)
def test_invalid_login(self):
'With invalid login, will not have a redirect, so, must return code 200'
self.response = self.client.post(
reverse('login:login'), self.make_validated_data(password='1')
)
self.assertEqual(self.response.status_code, 200)
def make_validated_data(self, **kwargs):
data = {
'username': 'admin',
'password': '123'
}
data.update(kwargs)
return data
#TODO - FIX THESE TESTS.
#I DON'T KNOW WHY IT IS NOT RETURNING ERRORS
#WHEN USERNAME OR PASSWORD IS EMPTY.
class LoginFormTest(TestCase):
def setUp(self):
user = User.objects.create_user('admin', 'admin@admin.com', '123')
def test_if_has_fields(self):
form = LoginForm()
existing_fields = list(form.fields.keys())
expected_field = ['username', 'password']
self.assertEqual(existing_fields, expected_field)
# def test_username | _is_not_optional(self):
# form = self.make_validated_form(username='')
# self.assertTrue(form.errors)
# def test_password_is_not_optional(self):
# form = self.make_validated_form(password='')
# self.assertTrue(form.errors)
def | test_form(self):
form = self.make_validated_form()
self.assertFalse(form.errors)
def make_validated_form(self, **kwargs):
data = {
'username': 'admin',
'password': '123',
}
data.update(kwargs)
form = LoginForm(data)
form.is_valid()
return form
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# ---------------------------------------------------- | ----------------------
from msrest.s | erialization import Model
class Usage(Model):
"""Describes Storage Resource Usage.
:param unit: The unit of measurement. Possible values include: 'Count',
'Bytes', 'Seconds', 'Percent', 'CountsPerSecond', 'BytesPerSecond'
:type unit: str or :class:`UsageUnit
<azure.mgmt.storage.v2015_06_15.models.UsageUnit>`
:param current_value: The current count of the allocated resources in the
subscription.
:type current_value: int
:param limit: The maximum count of the resources that can be allocated in
the subscription.
:type limit: int
:param name: The name of the type of usage.
:type name: :class:`UsageName
<azure.mgmt.storage.v2015_06_15.models.UsageName>`
"""
_validation = {
'unit': {'required': True},
'current_value': {'required': True},
'limit': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'unit': {'key': 'unit', 'type': 'UsageUnit'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'int'},
'name': {'key': 'name', 'type': 'UsageName'},
}
def __init__(self, unit, current_value, limit, name):
self.unit = unit
self.current_value = current_value
self.limit = limit
self.name = name
|
""" Django-admin autoregister -- automatic model registration
## sample admin.py ##
from yourproject.autoregister import autoregister
# register all models defined on each app
autoregister('app1', 'app2', 'app3', ...)
"""
from django.db.models import get_models, get_app
from django.con | trib import admin
from django.contrib.admin.sites import AlreadyRegistered
def autoregister(*app_list):
for app_name in app_list:
app_models = get_app(app_name)
for model in get_models(app_models):
try:
| admin.site.register(model)
except AlreadyRegistered:
pass
autoregister('utility') |
from common import *
import conduit.datatypes.File as File
import conduit.utils as Utils
import os
import tempfile
import datetime
import random
import stat
tmpdir = tempfile.mkdtemp()
ok("Created tempdir %s" % tmpdir, True)
contents = Utils.random_string()
name = Utils.random_string()+".foo"
tmpFile = File.TempFile(contents)
tmpFile.force_new_filename(name)
ok("Set filename to %s" % name, tmpFile._newFilename == name)
newPath = os.path.join(tmpdir, name)
tmpFile.transfer(tmpdir)
ok("Transferred -> %s" % newPath, os.path.isfile(newPath))
f = File.File(newPath)
ok("File contents = %s" % contents, f.get_contents_as_text() == contents)
mtime = f.get_mtime()
f = File.File(newPath)
ok("File name ok", f.get_filename() == name)
#make some 'real' files to play with
testDir = os.path.join(os.environ['TEST_DIRECTORY'],"TempFile")
if not os.path.exists(testDir):
os.mkdir(testDir)
testFiles = [
('/usr/bin/env','env',True,True),
('http://files.conduit-project.org/screenshot.png','screenshot.png',True,False)
]
for i in range(0,5):
j = Utils.random_string()
testFiles.append( (os.path.join(testDir,j),j,False,True) )
for path,i,readOnly,local in testFiles:
#1) create files
if not readOnly:
j = open(path,'w')
j.write(i)
j.close()
group = Utils.random_string()
f = File.File(path,group=group)
f.set_UID(Utils.random_string())
uid = f.get_UID()
size = f.get_size()
mt = f.get_mimetype()
#normal file operations on files, both r/o and writable
ok("not tempfile (%s)" % i, not f._is_tempfile())
ok("not tempfile uid ok", f.get_UID() == uid)
ok("not tempfile filename ok", f.get_filename() == i)
ok("not tempfile group ok", f.group == group)
nn = i+"-renamed"
f.force_new_filename(nn)
ok("not tempfile renamed ok", f.get_filename() == nn)
f.set_mtime(mtime)
ok("not tempfile set_mtime ok", f.get_mtime() == mtime)
#repeat the ops once we make the file a tempfile
if local:
tmppath = f.to_tempfile()
else:
tmppath = f.get_local_uri()
ok("tempfile (%s)" % tmppath, f.exists() and f._is_tempfile() and not f.is_directory())
ok("tempfile uid ok", f.get_UID() == uid)
ok("tempfile filename ok", f.get_filename() == nn)
ok("tempfile group ok", f.group == group)
ok("tempfile path is local", f.get_local_uri() == tmppath)
#check the transfer was ok
size2 = f.get_size()
ok("tempfile size is same", size == size2)
mt2 = f.get_mimetype()
ok("tempfile mimetype is same", mt == mt | 2)
#check that subsequent renames/mtimes are always deferred
#w | hen the file is a tempfile
nn = i+"-renamed-again"
f.force_new_filename(nn)
ok("tempfile filename ok again", f.get_filename() == nn)
mtime2 = datetime.datetime.now()
f.set_mtime(mtime2)
ok("tempfile set_mtime ok again", f.get_mtime() == mtime2)
#check we can create a second tempfile with the same props
#and delete it, leaving the first tempfile behind
tmppath2 = f.to_tempfile()
ok("second tempfile (%s)" % tmppath2, tmppath2 != tmppath)
ok("second tempfile name == first tempfile name", f.get_filename() == nn)
f.delete()
ok("second tempfile deleted", not f.exists())
#get the first tempfile again, rename to original and copy to the original folder
f = File.File(tmppath)
ok("again tempfile (%s)" % tmppath, f.exists() and f._is_tempfile() and not f.is_directory())
f.force_new_filename(i)
ok("again tempfile filename ok", f.get_filename() == i)
ok("again tempfile path is local", f.get_local_uri() == tmppath)
f.transfer(testDir)
ok("again not tempfile filename ok", f.get_filename() == i)
if not readOnly:
#only makes sense to perform on files that were originally created in 1)
ok("again not tempfile path matches original", f.get_local_uri() == path)
ok("again not tempfile mtime ok", f.get_mtime() == mtime)
finished()
|
create_mock_client()
bad_client.async_client_connect = AsyncMock(return_value=False)
# Confirmation sync_client_connect fails.
with patch(
"homeassistant.components.hyperion.client.HyperionClient",
side_effect=[good_client, bad_client],
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
async def test_user_confirm_id_error(hass: HomeAssistantType) -> None:
"""Test a failure fetching the server id during confirmation."""
result = await _init_flow(hass)
client = create_mock_client()
client.async_sysinfo_id = AsyncMock(return_value=None)
# Confirmation sync_client_connect fails.
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "no_id"
async def test_user_noauth_flow_success(hass: HomeAssistantType) -> None:
"""Check a full flow without auth."""
result = await _init_flow(hass)
client = create_mock_client()
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["handler"] == DOMAIN
assert result["title"] == TEST_TITLE
assert result["data"] == {
**TEST_HOST_PORT,
}
async def test_user_auth_required(hass: HomeAssistantType) -> None:
"""Verify correct behaviour when auth is required."""
result = await _init_flow(hass)
client = create_mock_client()
client.async_is_auth_required = AsyncMock(return_value=TEST_AUTH_REQUIRED_RESP)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "auth"
async def test_auth_static_token_auth_required_fail(hass: HomeAssistantType) -> None:
"""Verify correct behaviour with a failed auth required call."""
result = await _init_flow(hass)
client = create_mock_client()
client.async_is_auth_required = AsyncMock(return_value=None)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "auth_required_error"
async def test_auth_static_token_success(hass: HomeAssistantType) -> None:
"""Test a successful flow with a static token."""
result = await _init_flow(hass)
assert result["step_id"] == "user"
client = create_mock_client()
client.async_is_auth_required = AsyncMock(return_value=TEST_AUTH_REQUIRED_RESP)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
result = await _configure_flow(
hass, result, user_input={CONF_CREATE_TOKEN: False, CONF_TOKEN: TEST_TOKEN}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["handler"] == DOMAIN
assert result["title"] == TEST_TITLE
assert result["data"] == {
**TEST_HOST_PORT,
CONF_TOKEN: TEST_TOKEN,
}
async def test_auth_static_token_login_fail(hass: HomeAssistantType) -> None:
"""Test correct behavior with a bad static token."""
result = await _init_flow(hass)
assert result["step_id"] == "user"
client = create_mock_client()
client.async_is_auth_required = AsyncMock(return_value=TEST_AUTH_REQUIRED_RESP)
# Fail the login call.
client.async_login = AsyncMock(
return_value={"command": "authorize-login", "success": False, "tan": 0}
)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
result = await _configure_flow(
hass, result, user_input={CONF_CREATE_TOKEN: False, CONF_TOKEN: TEST_TOKEN}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"]["base"] == "invalid_access_token"
asyn | c def test_auth_create_token_approval_declined(hass: HomeAssistantType) -> None:
"""Verify correct behaviour when a token request is declined."""
result = await _init_flow(hass)
client = create_mock_client()
client.async_is_auth_required = AsyncMock(return_value=TEST_AUTH_REQUIRED_RESP)
with patch(
"homeassistant.components.hyperion.client.HyperionClient" | , return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "auth"
client.async_request_token = AsyncMock(return_value=TEST_REQUEST_TOKEN_FAIL)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
), patch(
"homeassistant.components.hyperion.config_flow.client.generate_random_auth_id",
return_value=TEST_AUTH_ID,
):
result = await _configure_flow(
hass, result, user_input={CONF_CREATE_TOKEN: True}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "create_token"
assert result["description_placeholders"] == {
CONF_AUTH_ID: TEST_AUTH_ID,
}
result = await _configure_flow(hass, result)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["step_id"] == "create_token_external"
# The flow will be automatically advanced by the auth token response.
result = await _configure_flow(hass, result)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "auth_new_token_not_granted_error"
async def test_auth_create_token_when_issued_token_fails(
hass: HomeAssistantType,
) -> None:
"""Verify correct behaviour when a token is granted by fails to authenticate."""
result = await _init_flow(hass)
client = create_mock_client()
client.async_is_auth_required = AsyncMock(return_value=TEST_AUTH_REQUIRED_RESP)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "auth"
client.async_request_token = AsyncMock(return_value=TEST_REQUEST_TOKEN_SUCCESS)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
), patch(
"homeassistant.components.hyperion.config_flow.client.generate_random_auth_id",
return_value=TEST_AUTH_ID,
):
result = await _configure_flow(
hass, result, user_input={CONF_CREATE_TOKEN: True}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "create_token"
assert result["description_placeholders"] == {
CONF_AUTH_ID: TEST_AUTH_ID,
}
result = await _configure_flow(hass, result)
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["step_id"] == "create_token_external"
# The flow will be automatically advanced by the auth token response.
# Make the last verification fail.
client.async_client_connect = AsyncMock(return_value=False)
result = await _configure_fl |
#!/usr/bin/env python3
# Python libs
import os.path
import sys
import icebox
# FIXME: Move this into icebox
parts = [
# LP Series (Low Power)
"lp384",
"lp1k",
"lp8k",
# Unsupported: "lp640", "lp4k" (alias for lp8k),
# LM Series (Low Power, Embedded IP)
# Unsupported: "lm1k", "lm2k",
"lm4k",
# HX Series (High Performance)
"hx1k",
"hx8k",
# Unsupported: "hx4k" (alias for hx8k)
# iCE40 UltraLite
# Unsupported: "ul640", "ul1k",
# iCE40 Ultra
# Unsupported: "ice5lp1k", "ice5lp2k", "ice5lp4k",
# iCE40 UltraPLus
# Unsupported: "up3k",
"up5k",
]
def versions(part):
return [p for p in parts if p.endswith(part)]
if __name__ == "__main__":
for name, pins in icebox.pinloc_db.items():
part, package = name.split('-')
| if ':' in package:
continue
for v in versions(part):
device = "{}.{}".format(v, package)
| print(device)
|
'''
charlie.py
---class for controlling charlieplexed SparkFun 8x7 LED Array with the Raspberry Pi
Relies upon RPi.GPIO written by Ben Croston
The MIT License (MIT)
Copyright (c) 2016 Amanda Cole
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import RPi.GPIO as GPIO, time, random
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
class Charlie:
'''
Class for control of the charlieplexed SparkFun 8x7 LED Array.
'''
def __init__(self, pins):
'''
pins: type 'list', list of ints for array pins a-h, in order [a,b,c,d,e,f,g,h]
'''
if len(pins) != 8:
print("You must specify eight, and only eight, pins.")
raise ValueError
for pin in pins:
if type(pin) != int:
print("Pins must be of type int.")
raise TypeError
GPIO.setup(pin, GPIO.OUT, initial = False)
a = pins[0]
b = pins[1]
c = pins[2]
d = pins[3]
e = pins[4]
f = pins[5]
g = pins[6]
h = pins[7]
self.array = [[[h,g] | ,[g,h],[f,h],[e,h],[d,h],[c,h],[b,h],[a,h]], \
[[h,f],[g,f],[f,g],[e,g],[d,g],[c,g],[b,g],[a,g]], \
[[h,e],[g,e],[f,e],[e,f],[d,f],[c,f],[b,f],[a,f]], \
[[h,d | ],[g,d],[f,d],[e,d],[d,e],[c,e],[b,e],[a,e]], \
[[h,c],[g,c],[f,c],[e,c],[d,c],[c,d],[b,d],[a,d]], \
[[h,b],[g,b],[f,b],[e,b],[d,b],[c,b],[b,c],[a,c]], \
[[h,a],[g,a],[f,a],[e,a],[d,a],[c,a],[b,a],[a,b]]]
self.ALL_PINS = [a,b,c,d,e,f,g,h]
def switchOrigin(self):
'''
Places origin [0,0] in the diagonally opposite corner of where its current position.
'''
switched_array = self.array
switched_array.reverse()
for i in switched_array:
i.reverse()
self.array = switched_array
def clearDisplay(self):
'''
Clears display.
'''
GPIO.setup(self.ALL_PINS, GPIO.IN)
def displayPoint(self, coord):
'''
coord: type 'list', coordinates of single pixel to be lit
Lights a single pixel.
'''
self.clearDisplay()
GPIO.setup(self.array[coord[0]][coord[1]][0], GPIO.OUT, initial = 1)
GPIO.setup(self.array[coord[0]][coord[1]][1], GPIO.OUT, initial = 0)
def test(self):
'''
Displays all pixels in array, one at a time, starting with [0,0] and ending with [6,7].
'''
x = 0
y = 0
while y < 8:
self.displayPoint([x,y])
time.sleep(0.1)
x += 1
if x >= 7:
x = 0
y += 1
self.clearDisplay()
def display(self, pixels, duration):
'''
pixels: type 'list', list of pixels to be lit each in coordinate form [x,y]
duration: type 'int', duration to display coordinates
Lights specified pixels in array
'''
positives = []
for coord in pixels:
if self.array[coord[0]][coord[1]][0] not in positives:
positives.append([self.array[coord[0]][coord[1]][0],[]])
for i in positives: #[[a,[]],[b,[]],[h,[]]]
for coord in pixels:
if self.array[coord[0]][coord[1]][0] == i[0]:
if self.array[coord[0]][coord[1]][1] not in i[1]:
i[1].append(self.array[coord[0]][coord[1]][1])
t = 0
pause = 0.02/len(positives)
while t < duration:
for i in range(0, len(positives)):
self.clearDisplay()
GPIO.setup(positives[i][0], GPIO.OUT, initial = True)
GPIO.setup(positives[i][1], GPIO.OUT, initial = False)
time.sleep(pause)
t += pause
self.clearDisplay()
def screensaver(self, duration, fill = .5):
'''
duration: type 'int', duration to keep screensaver on
fill: type 'float', proportion of array to fill with pixels at any given time
Randomly displays pixels on array.
'''
if fill > 1 or fill < 0:
print("fill must be of type 'float' between 0 and 1...using default value instead.")
fill = 0.5
t = 0
while t < duration:
coords = []
while len(coords) < fill*56:
coord = [random.randint(0,6), random.randint(0,7)]
if coord not in coords:
coords.append(coord)
self.display(coords, 0.15)
t += 0.1
|
# Copyright © 2016-2017 Simon McVittie
# SPDX-License-Identifier: GPL-2.0+
# (see vectis/__init__.py)
import os
import pwd
import shutil
import subprocess
from tempfile import TemporaryDirectory
from debian.debian_support import (
Version,
)
from vectis.commands.new import vmdebootstrap_argv
from vectis.error import ArgumentError
from vectis.worker import (
VirtWorker,
)
def run(args):
if args.suite is None:
if args.worker_suite is not None:
args.suite = args.worker_suite
else:
raise ArgumentError('--suite must be specified')
architecture = args.architecture
keep = args._keep
kernel_package = args.get_kernel_package(architecture)
mirrors = args.get_mirrors()
out = args.write_qemu_image
qemu_image_size = args.qemu_image_size
storage = args.storage
vendor = args.vendor
suite = args.get_suite(vendor, args.suite)
uri = args._uri
vmdebootstrap_options = args.vmdebootstrap_options
default_dir = os.path.join(
storage, architecture, str(vendor), str(suite))
if uri is None:
uri = mirrors.lookup_suite(suite)
try:
ve | rsion = subprocess.check_output(
['dpkg-query', '-W', '-f${Version}', 'vmdebootstrap'],
universal_newlines=True).rstrip('\n')
except subprocess.CalledProcessException:
| # non-dpkg host, guess a recent version
version = Version('1.7')
debootstrap_version = Version('1.0.89')
else:
version = Version(version)
debootstrap_version = subprocess.check_output(
['dpkg-query', '-W', '-f${Version}', 'debootstrap'],
universal_newlines=True).rstrip('\n')
debootstrap_version = Version(debootstrap_version)
with TemporaryDirectory(prefix='vectis-bootstrap-') as scratch:
argv = [
'sudo',
os.path.join(
os.path.dirname(__file__), os.pardir,
'vectis-command-wrapper'),
'--',
]
vmdb_argv, debootstrap_argv, default_name = vmdebootstrap_argv(
version,
architecture=architecture,
components=args.components,
debootstrap_version=debootstrap_version,
kernel_package=kernel_package,
qemu_image_size=qemu_image_size,
suite=suite,
uri=uri,
merged_usr=args._merged_usr,
)
argv.extend(vmdb_argv)
argv.append('--debootstrapopts=' + ' '.join(debootstrap_argv))
argv.extend(vmdebootstrap_options)
argv.append(
'--customize={}'.format(os.path.join(
os.path.dirname(__file__), os.pardir, 'setup-testbed')))
argv.append('--owner={}'.format(pwd.getpwuid(os.getuid())[0]))
argv.append('--image={}/output.raw'.format(scratch))
subprocess.check_call(argv)
subprocess.check_call([
'qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', '-c', '-p',
'{}/output.raw'.format(scratch),
'{}/output.qcow2'.format(scratch),
])
if out is None:
out = os.path.join(default_dir, default_name)
os.makedirs(os.path.dirname(out) or os.curdir, exist_ok=True)
shutil.move('{}/output.qcow2'.format(scratch), out + '.new')
try:
with VirtWorker(
['qemu', '{}.new'.format(out)],
storage=storage,
suite=suite,
mirrors=mirrors) as worker:
worker.check_call([
'env',
'DEBIAN_FRONTEND=noninteractive',
'apt-get',
'-y',
'--no-install-recommends',
'-t', suite.apt_suite,
'install',
'python3',
'sbuild',
'schroot',
])
except Exception:
if not keep:
os.remove(out + '.new')
raise
else:
os.rename(out + '.new', out)
|
شلوار",
"شلوارو": "شلوار",
"شلواروں": "شلوار",
"شلواریں": "شلوار",
"شمے": "شمہ",
"شمشیر": "شمشیر",
"شمشیرو": "شمشیر",
"شمشیروں": "شمشیر",
"شمشیریں": "شمشیر",
"شمارے": "شمارہ",
"شمارہ": "شمارہ",
"شمارو": "شمارہ",
"شماروں": "شمارہ",
"شمع": "شمع",
"شمعو": "شمع",
"شمعوں": "شمع",
"شمعیں": "شمع",
"شمہ": "شمہ",
"شملے": "شملہ",
"شملہ": "شملہ",
"شملو": "شملہ",
"شملوں": "شملہ",
"شمو": "شمہ",
"شموں": "شمہ",
"شناس": "شناس",
"شناسو": "شناس",
"شناسوں": "شناس",
"شق": "شق",
"شقو": "شق",
"شقوں": "شق",
"شقیں": "شق",
"شر": "شر",
"شرح": "شرح",
"شرحو": "شرح",
"شرحوں": "شرح",
"شرحیں": "شرح",
"شراب": "شراب",
"شرابے": "شرابہ",
"شرابہ": "شرابہ",
"شرابو": "شرابہ",
"شرابوں": "شرابہ",
"شرابی": "شرابی",
"شرابیں": "شراب",
"شرابیو": "شرابی",
"شرابیوں": "شرابی",
"شرارت": "شرارت",
"شرارتو": "شرارت",
"شرارتوں": "شرارت",
"شرارتیں": "شرارت",
"شرمگاہ": "شرمگاہ",
"شرمگاہو": "شرمگاہ",
"شرمگاہوں": "شرمگاہ",
"شرمگاہیں": "شرمگاہ",
"شرمندگی": "شرمندگی",
"شرمندگیاں": "شرمندگی",
"شرمندگیو": "شرمندگی",
"شرمندگیوں": "شرمندگی",
"شرو": "شر",
"شروں": "شر",
"شریف": "شریف",
"شریفو": "شریف",
"شریفوں": "شریف",
"شریک": "شریک",
"شریکو": "شریک",
"شریکوں": "شریک",
"شرط": "شرط",
"شرطو": "شرط",
"شرطوں": "شرط",
"شرطیں": "شرط",
"شو": "شہ",
"شوے": "شوہ",
"شوں": "شہ",
"شوشے": "شوشہ",
"شوشہ": "شوشہ",
"شوشو": "شوشہ",
"شوشوں": "شوشہ",
"شوہ": "شوہ",
"شوہر": "شوہر",
"شوہرو": "شوہر",
"شوہروں": "شوہر",
"شور": "شور",
"شورے": "شورہ",
"شوربے": "شوربہ",
"شوربہ": "شوربہ",
"شوربو": "شوربہ",
"شوربوں": "شوربہ",
"شورہ": "شورہ",
"شورو": "شورہ",
"شوروں": "شورہ",
"شوو": "شوہ",
"شووں": "شوہ",
"شیخ": "شیخ",
"شیخو": "شیخ",
"شیخوں": "شیخ",
"شیخی": "شیخی",
"شیخیاں": "شیخی",
"شیخیو": "شیخی",
"شیخیوں": "شیخی",
"شیشے": "شیشہ",
"شیشہ": "شیشہ",
"شیشو": "شیشہ",
"شیشوں": "شیشہ",
"شیشی": "شیشی",
"شیشیاں": "شیشی",
"شیشیو": "شیشی",
"شیشیوں": "شیشی",
"شیعے": "شیعہ",
"شیعہ": "شیعہ",
"شیعو": "شیعہ",
"شیعوں": "شیعہ",
"شیر": "شیر",
"شیرے": "شیرہ",
"شیرہ": "شیرہ",
"شیرو": "شیرہ",
"شیروں": "شیرہ",
"شیروانی": "شیروانی",
"شیروانیاں": "شیروانی",
"شیروانیو": "شیروانی",
"شیروانیوں": "شیروانی",
"شیریں": "شیر",
"شیوے": "شیوہ",
"شیوہ": "شیوہ",
"شیوو": "شیوہ",
"شیووں": "شیوہ",
"شیطان": "شیطان",
"شیطانو": "شیطان",
"شیطانوں": "شیطان",
"شیطانی": "شیطانی",
"شیطانیاں": "شیطانی",
"شیطانیو": "شیطانی",
"شیطانیوں": "شیطانی",
"ذخیرے": "ذخیرا",
"ذخیرا": "ذخیرا",
"ذخیرہ": "ذخیرہ",
"ذخیرو": "ذخیرا",
"ذخیروں": "ذخیرا",
"ذات": "ذات",
"ذاتو": "ذات",
"ذاتوں": "ذات",
"ذاتیں": "ذات",
"ذائقے": "ذائقہ",
"ذائقہ": "ذائقہ",
"ذائقو": "ذائقہ",
"ذائقوں": "ذائقہ",
"ذہن": "ذہن",
"ذہنو": "ذہن",
"ذہنوں": "ذہن",
"ذمّے": "ذمّہ",
"ذمّہ": "ذمّہ",
"ذمّو": "ذمّہ",
"ذمّوں": "ذمّہ",
"ذمے": "ذمہ",
"ذمہ": "ذمہ",
"ذمو": "ذمہ",
"ذموں": "ذمہ",
"ذر": "ذر",
"ذرّ": "ذرّ",
"ذرّے": "ذرّہ",
"ذرّہ": "ذرّہ",
"ذرّو": "ذرّہ",
"ذرّوں": "ذرّہ",
"ذرے": "ذرا",
"ذرا": "ذرا",
"ذرع": "ذرع",
"ذرعے": "ذرع",
"ذرعہ": "ذرعہ",
"ذرعو": "ذرع",
"ذرعوں": "ذرع",
"ذرہ": "ذرہ",
"ذرو": "ذرا",
"ذروں": "ذرا",
"ذریع": "ذریع",
"ذریعے": "ذریع",
"ذریعہ": "ذریعہ",
"ذریعو": "ذریع",
"ذریعوں": "ذریع",
"اَے": "اَے",
"اَخْبار": "اَخْبار",
"اَخْبارات": "اَخْبار",
"اَخْبارو": "اَخْبار",
"اَخْباروں": "اَخْبار",
"اَب": "اَب",
"اَمْجَد": "اَمْجَد",
"اَمر": "اَمر",
"اَپْنےآپ": "اَپْنےآپ",
"اَواخِر": "آخَر",
"اَوامِر": "اَمر",
"اَیسے": "اَیسا",
"اَیسا": "اَیسا",
"اَیسی": "اَیسا",
"ا(اo)تر": "اُترنا",
"ا(اo)ترے": "اُترنا",
"ا(اo)ترں": "اُترنا",
"ا(اo)ترا": "اُترنا",
"ا(اo)ترنے": "اُترنا",
"ا(اo)ترنا": "اُترنا",
"ا(اo)ترتے": "اُترنا",
"ا(اo)ترتا": "اُترنا",
"ا(اo)ترتی": "اُترنا",
"ا(اo)ترتیں": "اُترنا",
"ا(اo)ترو": "اُترنا",
"ا(اo)تروں": "اُترنا",
"ا(اo)تری": "اُترنا",
"ا(اo)تریے": "اُترنا",
"ا(اo)تریں": "اُترنا",
"اِحْسان": "اِحْسان",
"اِحْسانات": "اِحْسان",
"اِحْسانو": "اِحْسان",
"اِحْسانوں": "اِحْسان",
"اِخْبار": "اِخْبار",
"اِخْبارات": "اِخْبار",
"اِخْبارو": "اِخْبار",
"اِخْباروں": "اِخْبار",
"اِشْتِہار": "اِشْتِہار",
"اِشْتِہارات": "اِشْتِہار",
"اِشْتِہارو": "اِشْتِہار",
"اِشْتِہاروں": "اِشْتِہار",
"اِدَھر": "اِدَھر",
"اِن": "میں",
"اِس": "میں",
"اِسکے": "میرا",
"اِسکا": "میرا",
"اِسکی": "میرا",
"اِسی": "اِسی",
"اِتنے": "اِتنا",
"اِتنا": "اِتنا",
"اِتنی": "اِتنا",
"اِترا": "اِترانا",
"اِترانے": "اِترانا",
"ا | ِترانا": "اِترانا",
"اِترانی": "اِترانا",
"اِتراتے": "اِترانا",
"اِتراتا": "اِترانا",
"اِتراتی": "اِترانا",
"اِتراتیں": "اِترانا",
"اِتراؤ": "اِترانا",
"اِتراؤں": "اِترانا | ",
"اِترائے": "اِترانا",
"اِترائی": "اِترانا",
"اِترائیے": "اِترانا",
"اِترائیں": "اِترانا",
"اِترایا": "اِترانا",
"اُڑ": "اُڑنا",
"اُڑے": "اُڑنا",
"اُڑں": "اُڑنا",
"اُڑا": "اُڑنا",
"اُڑانے": "اُڑنا",
"اُڑانا": "اُڑنا",
"اُڑاتے": "اُڑنا",
"اُڑاتا": "اُڑنا",
"اُڑاتی": "اُڑنا",
"اُڑاتیں": "اُڑنا",
"اُڑاؤ": "اُڑنا",
"اُڑاؤں": "اُڑنا",
"اُڑائے": "اُڑنا",
"اُڑائی": "اُڑنا",
"اُڑائیے": "اُڑنا",
"اُڑائیں": "اُڑنا",
"اُڑایا": "اُڑنا",
"اُڑنے": "اُڑنا",
"اُڑنا": "اُڑنا",
"اُڑنی": "اُڑنا",
"اُڑس": "اُڑسنا",
"اُڑسے": "اُڑسنا",
"اُڑسں": "اُڑسنا",
"اُڑسا": "اُڑسنا",
"اُڑسانے": "اُڑسنا",
"اُڑسانا": "اُڑسنا",
"اُڑساتے": "اُڑسنا",
"اُڑساتا": "اُڑسنا",
"اُڑساتی": "اُڑسنا",
"اُڑساتیں": "اُڑسنا",
"اُڑساؤ": "اُڑسنا",
"اُڑساؤں": "اُڑسنا",
"اُڑسائے": "اُڑسنا",
"اُڑسائی": "اُڑسنا",
"اُڑسائیے": "اُڑسنا",
"اُڑسائیں": "اُڑسنا",
"اُڑسایا": "اُڑسنا",
"اُڑسنے": "اُڑسنا",
"اُڑسنا": "اُڑسنا",
"اُڑسنی": "اُڑسنا",
"اُڑستے": "اُڑسنا",
"اُڑستا": "اُڑسنا",
"اُڑستی": "اُڑسنا",
"اُڑستیں": "اُڑسنا",
"اُڑسو": "اُڑسنا",
"اُڑسوں": "اُڑسنا",
"اُڑسوا": "اُڑسنا",
"اُڑسوانے": "اُڑسنا",
"اُڑسوانا": "اُڑسنا",
"اُڑسواتے": "اُڑسنا",
"اُڑسواتا": "اُڑسنا",
"اُڑسواتی": "اُڑسنا",
"اُڑسواتیں": "اُڑسنا",
"اُڑسواؤ": "اُڑسنا",
"اُڑسواؤں": "اُڑسنا",
"اُڑسوائے": "اُڑسنا",
"اُڑسوائی": "اُڑسنا",
"اُڑسوائیے": "اُڑسنا",
"اُڑسوائیں": "اُڑسنا",
"اُڑسوایا": "اُڑسنا",
"اُڑسی": "اُڑسنا",
"اُڑسیے": "اُڑسنا",
"اُڑسیں": "اُڑسنا",
"اُڑتے": "اُڑنا",
"اُڑتا": "اُڑنا",
"اُڑتی": "اُڑنا",
"اُڑتیں": "اُڑنا",
"اُڑو": "اُڑنا",
"اُڑوں": "اُڑنا",
"اُڑوا": "اُڑنا",
"اُڑوانے": "اُڑنا",
"اُڑوانا": "اُڑنا",
"اُڑواتے": "اُڑنا",
"اُڑواتا": "اُڑنا",
"اُڑواتی": "اُڑنا",
"اُڑواتیں": "اُڑنا",
"اُڑواؤ": "اُڑنا",
"اُڑواؤں": "اُڑنا",
"اُڑوائے": "اُڑنا",
"اُڑوائی": "اُڑنا",
"اُڑوائیے": "اُڑنا",
"اُڑوائیں": "اُڑنا",
"اُڑوایا": "اُڑنا",
"اُڑی": "اُڑنا",
"اُڑیے": "اُڑنا",
"اُڑیں": "اُڑنا",
"اُٹھ": "اُٹھنا",
"اُٹھے": "اُٹھنا",
"اُٹھں": "اُٹھنا",
"اُٹھا": "اُٹھنا",
"اُٹھانے": "اُٹھنا",
"اُٹھانا": "اُٹھنا",
"اُٹھاتے": "اُٹھنا",
"اُٹھاتا": "اُٹھنا",
"اُٹھاتی": "اُٹھنا",
"اُٹھاتیں": "اُٹھنا",
"اُٹھاؤ": "اُٹھنا",
"اُٹھاؤں": "اُٹھنا",
"اُٹھائے": "اُٹھنا",
"اُٹھائی": "اُٹھنا",
"اُٹھائیے": "اُٹھنا",
"اُٹھائیں": "اُٹھنا",
"اُٹھایا": "اُٹھنا",
"اُٹھنے": "اُٹھنا",
"اُٹھنا": "اُٹھنا",
"اُٹھنی": "اُٹھنا",
"اُٹھتے": "اُٹھنا",
"اُٹھتا": "اُٹھنا",
"اُٹھتی": "اُٹھنا",
"اُٹھتیں": "اُٹھنا",
"اُٹھو": "اُٹھنا",
"اُٹھوں": "اُٹھنا",
"اُٹھوا": "اُٹھنا",
"اُٹھوانے": "اُٹھنا",
"اُٹھوانا": "اُٹھنا",
"اُٹھواتے": "اُٹھنا",
"اُٹھواتا": "اُٹھنا",
"اُٹھواتی": "اُٹھنا",
"اُٹھواتیں": "اُٹھنا",
"اُٹھواؤ": "اُٹھنا",
"اُٹھواؤں": "اُٹھنا",
"اُٹھوائے": "اُٹھنا",
"اُٹھوائی": "اُٹھنا",
"اُٹھوائیے": "اُٹھنا",
"اُٹھوائیں": "اُٹھنا",
"اُٹھوایا": "اُٹھنا",
"اُٹھی": "اُٹھنا",
"اُٹھیے": "اُٹھنا",
"اُٹھیں": "اُٹھنا",
"اُبھار": "اُبھار",
"اُبھارے": "اُبھرنا",
"اُبھارں": "اُبھرنا",
"اُبھارا": "اُبھرنا",
"اُبھارنے": "اُبھرنا",
"اُبھارنا": "اُبھرنا",
"اُبھارتے": "اُبھرنا",
"اُبھارتا": "اُبھرنا",
"اُبھارتی": "اُبھرنا",
"اُبھارتیں": "اُبھرنا",
"اُبھارو": "اُبھار",
"اُبھاروں": "اُبھار",
"اُبھاری": "اُبھرنا",
"اُبھاریے": "اُبھرنا",
"اُبھاریں": "اُبھرنا",
"اُبھر": "اُبھرنا",
"اُبھرے": "اُبھرنا",
"اُبھرں": "اُبھرنا",
"اُبھرا": "اُبھرنا",
"اُبھرنے |
""" Cisco_IOS_XR_patch_panel_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR patch\-panel package configuration.
This module contains definitions
for the following management objects\:
patch\-panel\: patch\-panel service submode
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class PatchPanel(object):
"""
patch\-panel service submode
.. attribute:: enable
Enable patch\-panel service
**type**\: :py:class:`Empty<ydk.types.Empty>`
**mandatory**\: True
.. attribute:: ipv4
IP address for patch\-panel
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: password
Password name to be used for Authentication with Patch\-Panel
**type**\: str
**pattern:** (!.+)\|([^!].+)
.. attribute:: user_name
User name to be used for Authentication with Patch\-Panel
**type**\: str
.. attribute:: _is_presence
Is present if this instance represents presence container else not
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'patch-panel-cfg'
_revision = '2015-11-09'
def __init__(self):
self._is_presence = True
self.ena | ble = None
self.ipv4 = None
self.password = | None
self.user_name = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-patch-panel-cfg:patch-panel'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self._is_presence:
return True
if self.enable is not None:
return True
if self.ipv4 is not None:
return True
if self.password is not None:
return True
if self.user_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_patch_panel_cfg as meta
return meta._meta_table['PatchPanel']['meta_info']
|
eor.graphql.core.utils.reordering import perform_reordering
from saleor.product import models
SortedModel = models.AttributeValue
def _sorted_by_order(items):
return sorted(items, key=lambda o: o[1])
def _get_sorted_map():
return list(
SortedModel.objects.values_list("pk", "sort_order").order_by("sort_order")
)
@pytest.fixture
def dummy_attribute():
return models.Attribute.objects.create(name="Dummy")
@pytest.fixture
def sorted_entries_seq(dummy_attribute):
attribute = dummy_attribute
values = SortedModel.objects.bulk_create(
[
SortedModel(
attribute=attribute, slug=f"value-{i}", name=f"Value-{i}", sort_order=i
)
for i in range(6)
]
)
return list(values)
@pytest.fixture
def sorted_entries_gaps(dummy_attribute):
attribute = dummy_attribute
values = SortedModel.objects.bulk_create(
[
SortedModel(
attribute=attribute, slug=f"value-{i}", name=f"Value-{i}", sort_order=i
)
for i in range(0, 12, 2)
]
)
return list(values)
def test_reordering_sequential(sorted_entries_seq):
"""
Ensures the reordering logic works as expected. This test simply provides
sequential sort order values and try to reorder them.
"""
qs = SortedModel.objects
nodes = sorted_entries_seq
operations = {nodes[5].pk: -1, nodes[2].pk: +3}
expected = _sorted_by_order(
[
(nodes[0].pk, 0),
(nodes[1].pk, 1),
(nodes[2].pk, 2 + 3),
(nodes[3].pk, 3 - 1),
(nodes[4].pk, 4 + 1 - 1),
(nodes[5].pk, 5 - 1 - 1),
]
)
perform_reordering(qs, operations)
actual = _get_sorted_map()
assert actual == expected
def test_reordering_non_sequential(sorted_entries_gaps):
"""
Ensures that reordering non-sequential sort order values is properly
handled. This case happens when an item gets deleted, creating gaps between values.
"""
qs = SortedModel.objects
nodes = sorted_entries_gaps
operations = {nodes[5].pk: -1, nodes[2].pk: +3}
expected = _sorted_by_order(
[
(nodes[0].pk, 0),
(nodes[1].pk, 2),
(nodes[2].pk, 4 + (3 * 2) - 1),
(nodes[3].pk, 6 - 1),
(nodes[4].pk, 8 + 1 - 1),
(nodes[5].pk, 10 - (1 * 2) - 1),
]
)
perform_reordering(qs, operations)
actual = _get_sorted_map()
assert actual == expected
@pytest.mark.parametrize(
"operation, expected_operations",
[((0, +5), (+5, -1, -1, -1, -1, -1)), ((5, -5), (+1, +1, +1, +1, +1, -5))],
)
def test_inserting_at_the_edges(sorted_entries_seq, operation, expected_operations):
"""
Ensures it is possible to move an item at the top and | bottom of the list.
"""
qs = SortedModel.objects
nodes = sorted_entries_seq
target_node_pos | , new_rel_sort_order = operation
operations = {nodes[target_node_pos].pk: new_rel_sort_order}
expected = _sorted_by_order(
[
(node.pk, node.sort_order + op)
for node, op in zip(nodes, expected_operations)
]
)
perform_reordering(qs, operations)
actual = _get_sorted_map()
assert actual == expected
def test_reordering_out_of_bound(sorted_entries_seq):
"""
Ensures it is not possible to manually create gaps or for the users
to insert anywhere they want, e.g. -1000, which could create a mess
into the database.
"""
qs = SortedModel.objects
nodes = sorted_entries_seq
operations = {nodes[5].pk: -100, nodes[0].pk: +100}
expected = _sorted_by_order(
[
(nodes[0].pk, 0 + 5),
(nodes[1].pk, 1),
(nodes[2].pk, 2),
(nodes[3].pk, 3),
(nodes[4].pk, 4),
(nodes[5].pk, 5 - 5),
]
)
perform_reordering(qs, operations)
actual = _get_sorted_map()
assert actual == expected
def test_reordering_null_sort_orders(dummy_attribute):
"""
Ensures null sort orders values are getting properly ordered (by ID sorting).
"""
attribute = dummy_attribute
qs = SortedModel.objects
non_null_sorted_entries = list(
qs.bulk_create(
[
SortedModel(
pk=1, attribute=attribute, slug="1", name="1", sort_order=1
),
SortedModel(
pk=2, attribute=attribute, slug="2", name="2", sort_order=0
),
]
)
)
null_sorted_entries = list(
qs.bulk_create(
[
SortedModel(
pk=5, attribute=attribute, slug="5", name="5", sort_order=None
),
SortedModel(
pk=4, attribute=attribute, slug="4", name="4", sort_order=None
),
SortedModel(
pk=3, attribute=attribute, slug="3", name="3", sort_order=None
),
]
)
)
operations = {null_sorted_entries[0].pk: -2}
expected = [
(non_null_sorted_entries[1].pk, 0),
(non_null_sorted_entries[0].pk, 1),
(null_sorted_entries[0].pk, 2),
(null_sorted_entries[2].pk, 3),
(null_sorted_entries[1].pk, 4),
]
perform_reordering(qs, operations)
actual = _get_sorted_map()
assert actual == expected
def test_reordering_nothing(sorted_entries_seq, assert_num_queries):
"""
Ensures giving operations that does nothing, are skipped. Thus only one query should
have been made: fetching the nodes.
"""
qs = SortedModel.objects
pk = sorted_entries_seq[0].pk
operations = {pk: 0}
with assert_num_queries(1) as ctx:
perform_reordering(qs, operations)
assert ctx[0]["sql"].startswith("SELECT "), "Should only have done a SELECT"
def test_giving_no_operation_does_no_query(sorted_entries_seq, assert_num_queries):
"""Ensures giving no operations runs no queries at all."""
qs = SortedModel.objects
with assert_num_queries(0):
perform_reordering(qs, {})
def test_reordering_concurrently(dummy_attribute, assert_num_queries):
"""
Ensures users cannot concurrently reorder, they need to wait for the other one
to achieve.
This must be the first thing done before doing anything. For that, we ensure
the first SQL query is acquiring the lock.
"""
qs = SortedModel.objects
attribute = dummy_attribute
entries = list(
qs.bulk_create(
[
SortedModel(
pk=1, attribute=attribute, slug="1", name="1", sort_order=0
),
SortedModel(
pk=2, attribute=attribute, slug="2", name="2", sort_order=1
),
]
)
)
operations = {entries[0].pk: +1}
with assert_num_queries(2) as ctx:
perform_reordering(qs, operations)
assert ctx[0]["sql"] == (
'SELECT "product_attributevalue"."id", "product_attributevalue"."sort_order" '
'FROM "product_attributevalue" '
"ORDER BY "
'"product_attributevalue"."sort_order" ASC NULLS LAST, '
'"product_attributevalue"."id" ASC FOR UPDATE'
)
assert ctx[1]["sql"] == (
'UPDATE "product_attributevalue" '
'SET "sort_order" = (CASE WHEN ("product_attributevalue"."id" = 1) '
'THEN 1 WHEN ("product_attributevalue"."id" = 2) '
"THEN 0 ELSE NULL END)::integer "
'WHERE "product_attributevalue"."id" IN (1, 2)'
)
def test_reordering_deleted_node_from_concurrent(dummy_attribute, assert_num_queries):
"""
Ensures if a node was deleted before locking, it just skip it instead of
raising an error.
"""
qs = SortedModel.objects
attribute = dummy_attribute
entries = list(
qs.bulk_create(
[
SortedModel(
pk=1, attribute=attribute, slug="1", name="1", sort_order=0
),
SortedModel(
pk=2, attribute= |
# -*- | coding: utf-8 -*-
import unittest
from test.basetestcases import PluginLoadingMixin
class StatisticsLoadingTest (PluginLoadingMixin, unittest.TestCase):
def getPluginDir(self):
"""
Должен возвращать путь до папки с тестируемым плагином
"""
return | "../plugins/statistics"
def getPluginName(self):
"""
Должен возвращать имя плагина, по которому его можно
найти в PluginsLoader
"""
return "Statistics"
|
able.
"""
try:
workbench = util.check_output('which wb_command')
workbench = workbench.strip()
except:
workbench = None
return workbench
def find_fsl():
"""
Returns the path of the fsl bin/ folder, or None if unavailable.
"""
# Check the FSLDIR environment variable first
shell_val = os.getenv('FSLDIR')
dir_fsl = os.path.join(shell_val, 'bin') if shell_val else ''
if os.path.exists(dir_fsl):
return dir_fsl
# If the env var method fails, fall back to using which. This method is
# not used first because sometimes the executable is installed separately
# from the rest of the fsl package, making it hard (or impossible) to locate
# fsl data files based on the returned path
try:
dir_fsl = util.check_output('which fsl')
dir_fsl = '/'.join(dir_fsl.split('/')[:-1])
except:
dir_fsl = None
return dir_fsl
def find_freesurfer():
"""
Returns the path of the freesurfer bin/ folder, or None if unavailable.
"""
try:
dir_freesurfer = util.check_output('which recon-all')
dir_freesurfer = '/'.join(dir_freesurfer.split('/')[:-1])
except:
dir_freesurfer = None
return dir_freesurfer
def find_msm():
try:
msm = util.check_output("which msm")
except:
msm = None
return msm.replace(os.linesep, '')
def find_scene_temp | lates():
"""
Returns the hcp scene templates path. If the shell variable
HCP_SCENE_TEMPLATES is set, uses that. Otherwise returns the defaults
stored in the ciftify/data/scene_templates folder.
"""
dir_hcp_templates = os.getenv('HCP_SCENE_TEMPLATES')
if dir_hcp_templates is None:
ciftify_path = os.path.dirname(__file__)
dir_hcp_templates = os.path.abspath(os.path.join(find_ciftify_global(),
'scene_templates'))
return dir_hcp_templates
def | find_ciftify_global():
"""
Returns the path to ciftify required config and support files. If the
shell variable CIFTIFY_DATA is set, uses that. Otherwise returns the
defaults stored in the ciftify/data folder.
"""
dir_templates = os.getenv('CIFTIFY_DATA')
if dir_templates is None:
ciftify_path = os.path.dirname(__file__)
dir_templates = os.path.abspath(os.path.join(ciftify_path, 'data'))
return dir_templates
def find_HCP_S900_GroupAvg():
"""return path to HCP_S900_GroupAvg which should be in ciftify"""
s900 = os.path.join(find_ciftify_global(), 'HCP_S900_GroupAvg_v1')
return s900
def find_freesurfer_data():
"""
Returns the freesurfer data path defined in the environment.
"""
try:
dir_freesurfer_data = os.getenv('SUBJECTS_DIR')
except:
dir_freesurfer_data = None
return dir_freesurfer_data
def find_hcp_data():
"""
Returns the freesurfer data path defined in the environment.
"""
try:
dir_hcp_data = os.getenv('HCP_DATA')
except:
dir_hcp_data = None
return dir_hcp_data
def wb_command_version():
'''
Returns version info about wb_command.
Will raise an error if wb_command is not found, since the scripts that use
this depend heavily on wb_command and should crash anyway in such
an unexpected situation.
'''
wb_path = find_workbench()
if wb_path is None:
raise EnvironmentError("wb_command not found. Please check that it is "
"installed.")
wb_help = util.check_output('wb_command')
wb_version = wb_help.split(os.linesep)[0:3]
sep = '{} '.format(os.linesep)
wb_v = sep.join(wb_version)
all_info = 'wb_command: {}Path: {} {}'.format(sep,wb_path,wb_v)
return(all_info)
def freesurfer_version():
'''
Returns version info for freesurfer
'''
fs_path = find_freesurfer()
if fs_path is None:
raise EnvironmentError("Freesurfer cannot be found. Please check that "
"it is installed.")
try:
fs_buildstamp = os.path.join(os.path.dirname(fs_path),
'build-stamp.txt')
with open(fs_buildstamp, "r") as text_file:
bstamp = text_file.read()
except:
return "freesurfer build information not found."
bstamp = bstamp.replace(os.linesep,'')
info = "freesurfer:{0}Path: {1}{0}Build Stamp: {2}".format(
'{} '.format(os.linesep),fs_path, bstamp)
return info
def fsl_version():
'''
Returns version info for FSL
'''
fsl_path = find_fsl()
if fsl_path is None:
raise EnvironmentError("FSL not found. Please check that it is "
"installed")
try:
fsl_buildstamp = os.path.join(os.path.dirname(fsl_path), 'etc',
'fslversion')
with open(fsl_buildstamp, "r") as text_file:
bstamp = text_file.read()
except:
return "FSL build information not found."
bstamp = bstamp.replace(os.linesep,'')
info = "FSL:{0}Path: {1}{0}Version: {2}".format('{} '.format(os.linesep),
fsl_path, bstamp)
return info
def msm_version():
'''
Returns version info for msm
'''
msm_path = find_msm()
if not msm_path:
return "MSM not found."
try:
version = util.check_output('msm --version').replace(os.linesep, '')
except:
version = ''
info = "MSM:{0}Path: {1}{0}Version: {2}".format('{} '.format(os.linesep),
msm_path, version)
return info
def ciftify_version(file_name=None):
'''
Returns the path and the latest git commit number and date if working from
a git repo, or the version number if working with an installed copy.
'''
logger = logging.getLogger(__name__)
try:
version = pkg_resources.get_distribution('ciftify').version
except pkg_resources.DistributionNotFound:
# Ciftify not installed, but a git repo, so return commit info
pass
else:
return "Ciftify version {}".format(version)
try:
dir_ciftify = util.check_output('which {}'.format(file_name))
except subprocess.CalledProcessError:
file_name = None
dir_ciftify = __file__
ciftify_path = os.path.dirname(dir_ciftify)
git_log = get_git_log(ciftify_path)
if not git_log:
logger.error("Something went wrong while retrieving git log. Returning "
"ciftify path only.")
return "Ciftify:{0}Path: {1}".format(os.linesep, ciftify_path)
commit_num, commit_date = read_commit(git_log)
info = "Ciftify:{0}Path: {1}{0}{2}{0}{3}".format('{} '.format(os.linesep),
ciftify_path, commit_num, commit_date)
if not file_name:
return info
## Try to return the file_name's git commit too, if a file was given
file_log = get_git_log(ciftify_path, file_name)
if not file_log:
# File commit info not found
return info
commit_num, commit_date = read_commit(file_log)
info = "{1}{5}Last commit for {2}:{0}{3}{0}{4}".format('{} '.format(
os.linesep), info, file_name, commit_num,
commit_date, os.linesep)
return info
def get_git_log(git_dir, file_name=None):
git_cmd = ["cd {}; git log".format(git_dir)]
if file_name:
git_cmd.append("--follow {}".format(file_name))
git_cmd.append("| head")
git_cmd = " ".join(git_cmd)
# Silence stderr
try:
with open(os.devnull, 'w') as DEVNULL:
file_log = util.check_output(git_cmd, stderr=DEVNULL)
except subprocess.CalledProcessError:
# Fail safe in git command returns non-zero value
logger = logging.getLogger(__name__)
logger.error("Unrecognized command: {} "
"\nReturning empty git log.".format(git_cmd))
file_log = ""
return file_log
def read_commit(git_log):
commit_num = git_log.split(os.linesep)[0]
commit_num = commit_num.replace('commit', 'Commit:')
commit_date = git_log.split(os.linesep)[2]
return commit_num, commit_date
def system_info():
''' return formatted version of the system info'''
sys_info = os.uname()
sep = '{} '.format(os.linesep)
|
#!/usr/bin/env python
#
# Copyright 2020 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gr_unittest
import random, numpy
from gnuradio import digital, blocks, channels
class qa_linear_equalizer(gr_unittest.TestCase):
def unpack_values(self, values_in, bits_per_value, bits_per_symbol):
# verify that 8 is divisible by bits_per_symbol
m = bits_per_value / bits_per_symbol
# print(m)
mask = 2**(bits_per_symbol)-1
if bits_per_value != m*bits_per_symbol:
print("error - bits per symbols must fit nicely into bits_per_value bit values")
return []
num_values = len(values_in)
num_symbols = int(num_values*( m) )
cur_byte = 0
cur_bit = 0
out = []
for i in range(num_symbols):
s = (values_in[cur_byte] >> (bits_per_value-bits_per_symbol-cur_bit)) & mask
out.append(s)
cur_bit += bits_per_symbol
if cur_bit >= bits_per_value:
cur_bit = 0
cur_byte += 1
return out
def map_symbols_to_constellation(self, symbols, cons):
l = list(map(lambda x: cons.points()[x], symbols))
return l
def setUp(self):
random.seed(987654)
self.tb = gr.top_block()
self.num_data = num_data = 10000
self.sps = sps = 4
self.eb = eb = 0.35
self.preamble = preamble = [0x27,0x2F,0x18,0x5D,0x5B,0x2A,0x3F,0x71,0x63,0x3C,0x17,0x0C,0x0A,0x41,0xD6,0x1F,0x4C,0x23,0x65,0x68,0xED,0x1C,0x77,0xA7,0x0E,0x0A,0x9E,0x47,0x82,0xA4,0x57,0x24,]
self.payload_size = payload_size = 300 # bytes
| self.data = data = [0]*4+[random.getrandbits(8) for i in range(payload_size)]
self.gain = gain = .001 # LMS gain
self.corr_thresh = corr_thresh = 3e6
self.num_taps = num_taps = 16
|
def tearDown(self):
self.tb = None
def transform(self, src_data, gain, const):
SRC = blocks.vector_source_c(src_data, False)
EQU = digital.lms_dd_equalizer_cc(4, gain, 1, const.base())
DST = blocks.vector_sink_c()
self.tb.connect(SRC, EQU, DST)
self.tb.run()
return DST.data()
def test_001_identity(self):
# Constant modulus signal so no adjustments
const = digital.constellation_qpsk()
src_data = const.points()*1000
N = 100 # settling time
expected_data = src_data[N:]
result = self.transform(src_data, 0.1, const)[N:]
N = -500
self.assertComplexTuplesAlmostEqual(expected_data[N:], result[N:], 5)
def test_qpsk_3tap_lms_training(self):
# set up fg
gain = 0.01 # LMS gain
num_taps = 16
num_samp = 2000
num_test = 500
cons = digital.constellation_qpsk().base()
rxmod = digital.generic_mod(cons, False, self.sps, True, self.eb, False, False)
modulated_sync_word_pre = digital.modulate_vector_bc(rxmod.to_basic_block(), self.preamble+self.preamble, [1])
modulated_sync_word = modulated_sync_word_pre[86:(512+86)] # compensate for the RRC filter delay
corr_max = numpy.abs(numpy.dot(modulated_sync_word,numpy.conj(modulated_sync_word)))
corr_calc = self.corr_thresh/(corr_max*corr_max)
preamble_symbols = self.map_symbols_to_constellation(self.unpack_values(self.preamble, 8, 2), cons)
alg = digital.adaptive_algorithm_lms(cons, gain).base()
evm = digital.meas_evm_cc(cons, digital.evm_measurement_t.EVM_PERCENT)
leq = digital.linear_equalizer(num_taps, self.sps, alg, False, preamble_symbols, 'corr_est')
correst = digital.corr_est_cc(modulated_sync_word, self.sps, 12, corr_calc, digital.THRESHOLD_ABSOLUTE)
constmod = digital.generic_mod(
constellation=cons,
differential=False,
samples_per_symbol=4,
pre_diff_code=True,
excess_bw=0.35,
verbose=False,
log=False)
chan = channels.channel_model(
noise_voltage=0.0,
frequency_offset=0.0,
epsilon=1.0,
taps=(1.0 + 1.0j, 0.63-.22j, -.1+.07j),
noise_seed=0,
block_tags=False)
vso = blocks.vector_source_b(self.preamble+self.data, True, 1, [])
head = blocks.head(gr.sizeof_float*1, num_samp)
vsi = blocks.vector_sink_f()
self.tb.connect(vso, constmod, chan, correst, leq, evm, head, vsi)
self.tb.run()
# look at the last 1000 samples, should converge quickly, below 5% EVM
upper_bound = list(20.0*numpy.ones((num_test,)))
lower_bound = list(0.0*numpy.zeros((num_test,)))
output_data = vsi.data()
output_data = output_data[-num_test:]
self.assertLess(output_data, upper_bound)
self.assertGreater(output_data, lower_bound)
if __name__ == '__main__':
gr_unittest.run(qa_linear_equalizer)
|
#
# Copyright 2015-2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you | may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# S | ee the License for the specific language governing permissions and
# limitations under the License.
#
# Authenticates user for accessing the ISB-CGC Endpoint APIs.
#
# May be run from the command line or in scripts/ipython.
#
# The credentials file can be copied to any machine from which you want
# to access the API.
#
# 1. Command Line
# python ./isb_auth.py saves the user's credentials;
# OPTIONAL:
# -v for verbose (returns token!)
# -s FILE sets credentials file [default: ~/.isb_credentials]
# -u URL-only: for use over terminal connections;
# gives user a URL to paste into their browser,
# and asks for an auth code in return
#
# 2. Python
# import isb_auth
# isb_auth.get_credentials()
#
# # optional: to store credentials in a different location
# from oauth2client.file import Storage
# import isb_auth
# import os
#
# storage_file = os.path.join(os.path.expanduser("~"), "{USER_CREDENTIALS_FILE_NAME}")
# storage = Storage(storage_file)
# isb_auth.get_credentials(storage=storage)
#
from __future__ import print_function
from argparse import ArgumentParser
import os
from oauth2client.client import OAuth2WebServerFlow
from oauth2client import tools
from oauth2client.file import Storage
VERBOSE = False
# for native application - same as settings.INSTALLED_APP_CLIENT_ID
CLIENT_ID = '586186890913-atr969tu3lf7u574khjjplb45fgpq1bg.apps.googleusercontent.com'
# NOTE: this is NOT actually a 'secret' -- we're using the 'installed
# application' OAuth pattern here
CLIENT_SECRET = 'XeBxiK7NQ0yvAkAnRIKufkFE'
EMAIL_SCOPE = 'https://www.googleapis.com/auth/userinfo.email'
DEFAULT_STORAGE_FILE = os.path.join(os.path.expanduser("~"), '.isb_credentials')
def maybe_print(msg):
if VERBOSE:
print(msg)
def get_credentials(storage=None, oauth_flow_args=[]):
noweb = '--noauth_local_webserver'
if __name__ != '__main__' and noweb not in oauth_flow_args:
oauth_flow_args.append(noweb)
if storage is None:
storage = Storage(DEFAULT_STORAGE_FILE)
credentials = storage.get()
if not credentials or credentials.invalid:
maybe_print('credentials missing/invalid, kicking off OAuth flow')
flow = OAuth2WebServerFlow(CLIENT_ID, CLIENT_SECRET, EMAIL_SCOPE)
flow.auth_uri = flow.auth_uri.rstrip('/') + '?approval_prompt=force'
credentials = tools.run_flow(flow, storage, tools.argparser.parse_args(oauth_flow_args))
return credentials
def main():
global VERBOSE
args = parse_args()
oauth_flow_args = [args.noauth_local_webserver] if args.noauth_local_webserver else []
VERBOSE = args.verbose
maybe_print('--verbose: printing extra information')
storage = Storage(args.storage_file)
credentials = get_credentials(storage, oauth_flow_args)
maybe_print('credentials stored in ' + args.storage_file)
maybe_print('access_token: ' + credentials.access_token)
maybe_print('refresh_token: ' + credentials.refresh_token)
def parse_args():
parser = ArgumentParser()
parser.add_argument('--storage_file', '-s', default=DEFAULT_STORAGE_FILE, help='storage file to use for the credentials (default is {})'.format(DEFAULT_STORAGE_FILE))
parser.add_argument('--verbose', '-v', dest='verbose', action='store_true', help='display credentials storage location, access token, and refresh token')
parser.set_defaults(verbose=False)
parser.add_argument('--noauth_local_webserver','-u', action='store_const', const='--noauth_local_webserver')
return parser.parse_args()
if __name__ == '__main__':
main() |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserKey'
db.create_table('sshkey_userkey', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('key', self.gf('django.db.models.fields.TextField')(max_length=2000)),
('fingerprint', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=47, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal('django_sshkey', ['UserKey'])
# Adding unique constraint on 'UserKey', fields ['user', 'name']
db.create_unique('sshkey_userkey', ['user_id', 'name'])
def backwards(self, orm):
# Removing unique constraint on 'UserKey', fields ['user', 'name']
db.delete_unique('sshkey_userkey', ['user_id', 'name'])
# Deleting model 'UserKey'
db.delete_table('sshkey_userkey')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user' | : {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'t | o': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'django_sshkey.userkey': {
'Meta': {'unique_together': "[('user', 'name')]", 'object_name': 'UserKey', 'db_table': "'sshkey_userkey'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'fingerprint': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '47', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.TextField', [], {'max_length': '2000'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['django_sshkey']
|
lue(key)
assert headers['Sec-Websocket-Accept'] == accept
extensions = self._parse_extensions_header(headers)
for ext in extensions:
if (ext[0] == 'permessage-deflate' and
self._compression_options is not None):
self._create_compressors('client', ext[1])
else:
raise ValueError("unsupported extension %r", ext)
def _get_compressor_options(self, side, agreed_parameters):
"""Converts a websocket agreed_parameters set to keyword arguments
for our compressor objects.
"""
options = dict(
persistent=(side + '_no_context_takeover') not in agreed_parameters)
wbits_header = agreed_parameters.get(side + '_max_window_bits', None)
if wbits_header is None:
options['max_wbits'] = zlib.MAX_WBITS
else:
options['max_wbits'] = int(wbits_header)
return options
def _create_compressors(self, side, agreed_parameters):
# TODO: handle invalid parameters gracefully
allowed_keys = set(['server_no_context_takeover',
'client_no_context_takeover',
'server_max_window_bits',
'client_max_window_bits'])
for key in agreed_parameters:
if key not in allowed_keys:
raise ValueError("unsupported compression parameter %r" % key)
other_side = 'client' if (side == 'server') else 'server'
self._compressor = _PerMessageDeflateCompressor(
**self._get_compressor_options(side, agreed_parameters))
self._decompressor = _PerMessageDeflateDecompressor(
**self._get_compressor_options(other_side, agreed_parameters))
def _write_frame(self, fin, opcode, data, flags=0):
if fin:
finbit | = self.FIN
else:
finbit = 0
frame = struct.pack("B", finbit | opcode | flags)
l = len(data)
if self.mask_outgoing:
mask_bit = 0x80
else:
mask_bit = 0
if l < 126:
frame += struct.pack("B", l | mask_bit)
elif l <= 0xFFFF:
frame += struct.pack("!BH", 126 | mask_bit, l)
else:
frame += struct.pack("!BQ", 127 | mask_bit, l)
if self.mask_outgoing:
| mask = os.urandom(4)
data = mask + _websocket_mask(mask, data)
frame += data
self._wire_bytes_out += len(frame)
try:
return self.stream.write(frame)
except StreamClosedError:
self._abort()
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket."""
if binary:
opcode = 0x2
else:
opcode = 0x1
message = tornado.escape.utf8(message)
assert isinstance(message, bytes)
self._message_bytes_out += len(message)
flags = 0
if self._compressor:
message = self._compressor.compress(message)
flags |= self.RSV1
return self._write_frame(True, opcode, message, flags=flags)
def write_ping(self, data):
"""Send ping frame."""
assert isinstance(data, bytes)
self._write_frame(True, 0x9, data)
def _receive_frame(self):
try:
self.stream.read_bytes(2, self._on_frame_start)
except StreamClosedError:
self._abort()
def _on_frame_start(self, data):
self._wire_bytes_in += len(data)
header, payloadlen = struct.unpack("BB", data)
self._final_frame = header & self.FIN
reserved_bits = header & self.RSV_MASK
self._frame_opcode = header & self.OPCODE_MASK
self._frame_opcode_is_control = self._frame_opcode & 0x8
if self._decompressor is not None and self._frame_opcode != 0:
self._frame_compressed = bool(reserved_bits & self.RSV1)
reserved_bits &= ~self.RSV1
if reserved_bits:
# client is using as-yet-undefined extensions; abort
self._abort()
return
self._masked_frame = bool(payloadlen & 0x80)
payloadlen = payloadlen & 0x7f
if self._frame_opcode_is_control and payloadlen >= 126:
# control frames must have payload < 126
self._abort()
return
try:
if payloadlen < 126:
self._frame_length = payloadlen
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length,
self._on_frame_data)
elif payloadlen == 126:
self.stream.read_bytes(2, self._on_frame_length_16)
elif payloadlen == 127:
self.stream.read_bytes(8, self._on_frame_length_64)
except StreamClosedError:
self._abort()
def _on_frame_length_16(self, data):
self._wire_bytes_in += len(data)
self._frame_length = struct.unpack("!H", data)[0]
try:
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length, self._on_frame_data)
except StreamClosedError:
self._abort()
def _on_frame_length_64(self, data):
self._wire_bytes_in += len(data)
self._frame_length = struct.unpack("!Q", data)[0]
try:
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length, self._on_frame_data)
except StreamClosedError:
self._abort()
def _on_masking_key(self, data):
self._wire_bytes_in += len(data)
self._frame_mask = data
try:
self.stream.read_bytes(self._frame_length,
self._on_masked_frame_data)
except StreamClosedError:
self._abort()
def _on_masked_frame_data(self, data):
# Don't touch _wire_bytes_in; we'll do it in _on_frame_data.
self._on_frame_data(_websocket_mask(self._frame_mask, data))
def _on_frame_data(self, data):
self._wire_bytes_in += len(data)
if self._frame_opcode_is_control:
# control frames may be interleaved with a series of fragmented
# data frames, so control frames must not interact with
# self._fragmented_*
if not self._final_frame:
# control frames must not be fragmented
self._abort()
return
opcode = self._frame_opcode
elif self._frame_opcode == 0: # continuation frame
if self._fragmented_message_buffer is None:
# nothing to continue
self._abort()
return
self._fragmented_message_buffer += data
if self._final_frame:
opcode = self._fragmented_message_opcode
data = self._fragmented_message_buffer
self._fragmented_message_buffer = None
else: # start of new data message
if self._fragmented_message_buffer is not None:
# can't start new message until the old one is finished
self._abort()
return
if self._final_frame:
opcode = self._frame_opcode
else:
self._fragmented_message_opcode = self._frame_opcode
self._fragmented_message_buffer = data
if self._final_frame:
self._handle_message(opcode, data)
if not self.client_terminated:
self._receive_frame()
def _handle_message(self, opcode, data):
if self.client_terminated:
return
if self._frame_compressed:
data = self._decompressor.decompress(data)
if opcode == 0x1:
# UTF-8 data
self._message_bytes_in += len(data)
try:
|
cable no se agrupan por lotes.
elif isinstance(articulo, (pclases.Pale, pclases.Caja)):
#, pclases.Bolsa)):
lote = articulo.partidaCem
self.rellenar_info_partida_cemento(lote, txtvw)
else:
escribir(txtvw, "¡NO SE ENCONTRÓ INFORMACIÓN!\n"
"Posible inconsistencia de la base de datos. "
"Contacte con el administrador.")
self.logger.error("trazabilidad_articulos.py::"
"rellenar_lote_partida -> "
"No se encontró información acerca del "
"artículo ID %d." % (articulo.id))
def rellenar_albaran(self, articulo):
txtvw = self.wids['txt_albaran']
borrar_texto(txtvw)
if isinstance(articulo, pclases.Pale):
pale = articulo
cajas_a_mostrar = []
albaranes_tratados = []
for caja in pale.cajas:
articulo_caja = caja.articulo
alb = articulo_caja.albaranSalida
if alb not in albaranes_tratados:
albaranes_tratados.append(alb)
cajas_a_mostrar.append(caja)
for caja in cajas_a_mostrar:
self.rellenar_albaran(caja)
else:
try:
a = articulo.articulos[0]
except IndexError, msg:
self.logger.error("ERROR trazabilidad_articulos.py "
"(rellenar_albaran): %s" % (msg))
else:
for fecha, objeto, almacen in a.get_historial_trazabilidad():
if isinstance(objeto, pclases.AlbaranSalida):
escribir(txtvw, "Albarán número: %s (%s)\n" % (
objeto.numalbaran,
objeto.get_str_tipo()),
("_rojoclaro", ))
escribir(txtvw, "Fecha: %s\n" %
utils.str_fecha(objeto.fecha))
escribir(txtvw, "Transportista: %s\n" % (
objeto.transportista
and objeto.transportista.nombre or ''))
escribir(txtvw, "Cliente: %s\n" % (
objeto.cliente and objeto.cliente.nombre or ''),
("negrita", ))
destino = (objeto.almacenDestino and
objeto.almacenDestino.nombre or
objeto.nombre)
escribir(txtvw, "Origen: %s\n" % (
objeto.almacenOrigen
and objeto.almacenOrigen.nombre
or "ERROR - ¡Albarán sin almacén de origen!"))
escribir(txtvw, "Destino: %s\n" % (destino))
elif isinstance(objeto, pclases.Abono):
escribir(txtvw,
"El artículo fue devuelto el %s a %s en el abono"
" %s.\n" % (utils.str_fecha(fecha),
almacen.nombre,
objeto.numabono),
("rojo", ))
# Y si ya está efectivamente en almacén, lo digo:
adeda = None
for ldd in objeto.lineasDeDevolucion:
if ldd.articulo == a: # ¡Te encontré, sarraceno!
adeda = ldd.albaranDeEntradaDeAbono
if not adeda:
escribir(txtvw,"El artículo aún no ha entrado"
" en almacén. El abono no ha generado albarán "
"de entrada de mercancía.\n",
("negrita", ))
else:
escribir(txtvw, "El artículo se recibió en "
"el albarán de entrada de abono %s el día "
| "%s.\n" % (
adeda.numalbaran,
utils.str_fecha(adeda.fecha)))
elif isinstance(objeto, pclases.PartidaCarga):
escribir(txtvw,
"Se consumió el %s en la partida d | e carga %s.\n"%(
utils.str_fecha(fecha),
objeto.codigo),
("_rojoclaro", "cursiva"))
if articulo.articulo.en_almacen():
escribir(txtvw,
"El artículo está en almacén: %s.\n" % (
articulo.articulo.almacen
and articulo.articulo.almacen.nombre
or "¡Error de coherencia en la BD!"),
("_verdeclaro", ))
if (hasattr(articulo, "parteDeProduccionID")
and articulo.parteDeProduccionID):
# Ahora también se pueden consumir los Bigbags.
pdp = articulo.parteDeProduccion
if pdp:
if isinstance(articulo, pclases.Bigbag):
escribir(txtvw,
"\nBigbag consumido el día %s para producir la"
" partida de fibra de cemento embolsado %s."%(
utils.str_fecha(pdp.fecha),
pdp.partidaCem.codigo),
("_rojoclaro", "cursiva"))
def func_orden_ldds_por_albaran_salida(self, ldd1, ldd2):
"""
Devuelve -1, 1 ó 0 dependiendo de la fecha de los albaranes de salida
relacionados con las líneas de devolución. Si las fechas son iguales,
ordena por ID de las LDD.
"""
if ldd1.albaranSalida and (ldd2.albaranSalida == None
or ldd1.albaranSalida.fecha < ldd2.albaranSalida.fecha):
return -1
if ldd2.albaranSalida and (ldd1.albaranSalida == None
or ldd1.albaranSalida.fecha > ldd2.albaranSalida.fecha):
return 1
if ldd1.id < ldd2.id:
return -1
if ldd1.id > ldd2.id:
return 1
return 0
def mostrar_info_abonos(self, articulo):
"""
Muestra la información de los abonos del artículo.
"""
if articulo.lineasDeDevolucion:
txtvw = self.wids['txt_albaran']
ldds = articulo.lineasDeDevolucion[:]
ldds.sort(self.func_orden_ldds_por_albaran_salida)
for ldd in ldds:
try:
escribir(txtvw,
"Salida del almacén el día %s en el albarán "
"%s para %s.\n" % (
utils.str_fecha(ldd.albaranSalida.fecha),
ldd.albaranSalida.numalbaran,
ldd.albaranSalida.cliente
and ldd.albaranSalida.cliente.nombre
or "?"),
("_rojoclaro", "cursiva"))
escribir(txtvw,
"Devuelto el día %s en el albarán de entrada "
"de abono %s.\n" % (
utils.str_fecha(
ldd.albaranDeEntradaDeAbono.fecha),
ldd.albaranDeEntradaDeAbono.numalbaran),
("_verdeclaro", "cursiva"))
except AttributeError, msg:
escribir(txtvw,
"ERROR DE INCONSISTENCIA. Contacte con el "
"administrador de la base de datos.\n",
("negrita", ))
txterror="trazabilidad_artic |
from images import fields
from product | _images.models import ProductImage
class ImagesFormField(fields.ImagesFormField):
def __init__(self):
super().__init__(ProductIma | ge)
|
from __future__ import print_function
from __future__ import unicode_literals
import io
import os.path
import pipes
import sys
from pre_commit import output
from pre_commit.util import make_executable
from pre_commit.util import mkdirp
from pre_commit.util import resource_filename
# This is used to identify the hook file we install
PRIOR_HASHES = (
'4d9958c90bc262f47553e2c073f14cfe',
'd8ee923c46731b42cd95cc869add4062',
'49fd668cb42069aa1b6048464be5d395',
'79f09a650522a87b0da915d0d983b2de',
'e358c9dae00eac5d06b38dfdb1e33a8c',
)
CURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'
def is_our_script(filename):
if not os.path.exists(filename):
return False
contents = io.open(filename).read()
return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)
def install(
runner, overwrite=False, hooks=False, hook_type='pre-commit',
skip_on_missing_conf=False,
):
"""Install the pre-commit hooks."""
hook_path = runner.get_hook_path(hook_type)
legacy_path = hook_path + '.legacy'
mkdirp(os.path.dirname(hook_path))
# If we have an existing hook, move it to pre-commit.legacy
if os.path.lexists(hook_path) and not is_our_script(hook_path):
os.rename(hook_path, legacy_path)
# If we specify overwrite, we simply delete the legacy file
if overwrite and os.path.exists(legacy_path):
os.remove(legacy_path)
elif os.path.exists(legacy_path):
output.write_line(
'Running in migration mode with existing hooks at {}\n'
'Use -f to use only pre-commit.'.format(
legacy_path,
),
)
with io.open(hook_path, 'w') as pre_commit_file_obj:
if hook_type == 'pre-push':
with io.open(resource_filename('pre-push-tmpl')) as f:
hook_specific_contents = f.read()
elif hook_type == 'commit-msg':
with io.open(resource_filename('commit-msg-tmpl')) as f:
hook_specific_contents = f.read()
elif hook_type == 'pre-commit':
hook_specific_contents = ''
else:
raise AssertionError('Unknown hook type: {}'.format(h | ook_type))
skip_on_missing_conf = 'true' if skip_on_missing_conf else 'false'
contents = io.open(resource_filename('hook-tmpl')).read().format(
sys_executable=pipes.quote(sys.executable),
hook_type=hook_type,
hook_specific=hook_specific_contents,
config_file=runner.config_file,
skip_on_missing_conf=skip_on_missing_conf,
)
pre_commit_file_obj.write(contents)
make_executable(hook_path)
output.write_line('pr | e-commit installed at {}'.format(hook_path))
# If they requested we install all of the hooks, do so.
if hooks:
install_hooks(runner)
return 0
def install_hooks(runner):
for repository in runner.repositories:
repository.require_installed()
def uninstall(runner, hook_type='pre-commit'):
"""Uninstall the pre-commit hooks."""
hook_path = runner.get_hook_path(hook_type)
legacy_path = hook_path + '.legacy'
# If our file doesn't exist or it isn't ours, gtfo.
if not os.path.exists(hook_path) or not is_our_script(hook_path):
return 0
os.remove(hook_path)
output.write_line('{} uninstalled'.format(hook_type))
if os.path.exists(legacy_path):
os.rename(legacy_path, hook_path)
output.write_line('Restored previous hooks to {}'.format(hook_path))
return 0
|
int | ervals = [[10,20],[6,15],[0,22]]
print(sorted(interval | s)) |
self.Step (M | essage = "Receptionist- | N ->> Klient-N [genvej: fokus-modtagerliste] (måske)")
self.Step (Message = "Receptionist-N ->> Klient-N [retter modtagerlisten]")
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils.data import flt, nowdate, getdate, cint
class MoneyTransfere(Document):
def on_submit(self):
self.validate_transfere()
def validate(self):
self.get_dummy_accounts()
def get_dummy_accounts(self):
dummy_to = frappe.db.get_values("Account", {"name": "حساب استلام من"+" - "+self | .from_company + " - "+self.abbr_to,
"com | pany": self.to_company,
"parent_account":"حساب استلام من"+" - "+self.abbr_to })
self.dummy_to=dummy_to[0][0]
dummy_from = frappe.db.get_values("Account", {"name": "حساب ارسال الي"+" - "+self.to_company + " - "+self.abbr,
"company": self.from_company,
"parent_account":"حساب ارسال"+" - "+self.abbr })
self.dummy_from=dummy_from[0][0]
def before_cancel(self):
pe = frappe.get_value("Payment Entry", filters = {"transfere_reference": self.name}, fieldname = "name")
if pe:
pe_doc = frappe.get_doc("Payment Entry", pe)
pe_doc.cancel()
je = frappe.get_value("Journal Entry Account", filters = {"reference_name": self.name}, fieldname = "parent")
if je:
je_doc = frappe.get_doc("Journal Entry", je)
je_doc.cancel()
def validate_transfere(self):
if self.from_company != self.to_company:
# sending_account = "حساب ارسال الى " + self.to_company
# receiving_account = "حساب استلام من " + self.from_company
# self.add_account_for_company(sending_account, self.to_company, "Liability")
# self.add_account_for_company(receiving_account, self.from_company, "Expense")
self.add_payment_entry(self.from_account, self.dummy_from, self.from_company)
self.add_journal_entry(self.to_account,self.dummy_to, self.to_company)
else:
self.add_payment_entry(self.from_account, self.to_account, self.from_company)
def add_account_for_company(self, account, company, r_type):
pass
# pacc_name = ""
# if r_type == "Expense":
# pacc_name = "حساب ارسال - E"
# elif r_type == "Liability":
# pacc_name = "حساب استقبال - o"
# # if not frappe.db.exists("Account", pacc_name):
# # pacc = frappe.new_doc("Account")
# # pacc.account_name = pacc_name
# # pacc.root_type = r_type
# # pacc.is_group = 1
# # pacc.parent_account = ""
# # pacc.company = company
# # pacc.flags.ignore_validate = True
# # pacc.insert()
# if not frappe.db.exists("Account", account):
# acc = frappe.new_doc("Account")
# acc.account_name = account
# acc.company = company
# acc.parent_account = pacc_name
# acc.is_group = 0
# acc.insert()
def add_payment_entry(self, paid_from, paid_to, company):
pe = frappe.new_doc("Payment Entry")
pe.payment_type = "Internal Transfer"
pe.company = company
pe.paid_from = paid_from
pe.paid_to = paid_to
pe.paid_amount = self.transfered_amount
pe.received_amount = self.transfered_amount
pe.posting_date = nowdate()
pe.mode_of_payment = self.mode_of_payment
pe.transfere_reference = self.name
pe.insert()
pe.submit()
# pe.setup_party_account_field()
# pe.set_missing_values()
# pe.set_exchange_rate()
# pe.set_amounts()
# self.assertEquals(pe.difference_amount, 500)
# pe.append("deductions", {
# "account": "_Test Exchange Gain/Loss - _TC",
# "cost_center": "_Test Cost Center - _TC",
# "amount": 500
# })
def add_journal_entry(self, account1, account2, company):
default_cost = frappe.get_value("Company", filters = {"name":company}, fieldname = "cost_center")
jv = frappe.new_doc("Journal Entry")
jv.posting_date = nowdate()
jv.company = company
jv.voucher_type = "Opening Entry"
jv.set("accounts", [
{
"account": account2,
"credit_in_account_currency": self.transfered_amount,
"cost_center": default_cost,
"reference_type": "Money Transfere",
"reference_name": self.name
}, {
"account": account1,
"debit_in_account_currency": self.transfered_amount,
"cost_center": default_cost,
"reference_type": "Money Transfere",
"reference_name": self.name
}
])
jv.insert()
jv.submit()
|
from django.conf.urls import patterns, url, include
from .views import empty_view, empty_view_partial, empty_view_wrapped, absolute_kwargs_view
other_patterns = patterns('',
url(r'non_path_include/$', empty_view, name='non_path_include'),
url(r'nested_path/$', 'urlpatterns_reverse.views.nested_view'),
)
urlpatterns = patterns('',
url(r'^places/(\d+)/$', empty_view, name='places'),
url(r'^places?/$', empty_view, name="places?"),
url(r'^places+/$', empty_view, name="places+"),
url(r'^places*/$', empty_view, name="places*"),
url(r'^(?:places/)?$', empty_view, name="places2?"),
url(r'^(?:places/)+$', empty_view, name="places2+"),
url(r'^(?:places/)*$', empty_view, name="places2*"),
url(r'^places/(\d+|[a-z_]+)/', empty_view, name="places3"),
url(r'^places/(?P<id>\d+)/$', empty_view, name="places4"),
url(r'^people/(?P<name>\w+)/$', empty_view, name="people"),
url(r'^people/(?:name/)', empty_view, name="people2"),
url(r'^people/(?:name/(\w+)/)?', empty_view, name="people2a"),
url(r'^people/(?P<name>\w+)-(?P=name)/$', empty_view, name="people_backref"),
url(r'^optional/(?P<name>.*)/(?:.+/)?', empty_view, name="optional"),
url(r'^hardcoded/$', empty_view, name="hardcoded"),
url(r'^hardcoded/doc\.pdf$', empty_view, name="hardcoded2"),
url(r'^people/(?P<state>\w\w)/(?P<name>\w+)/$', empty_view, name="people3"),
url(r'^people/(?P<state>\w\w)/(?P<name>\d)/$', empty_view, name="people4"),
url(r'^people/((?P<state>\w\w)/test)?/(\w+)/$', empty_view, name="people6"),
url(r'^character_set/[abcdef0-9]/$', empty_view, name="range"),
url(r'^character_set/[\w]/$', empty_view, name="range2"),
url(r'^price/\$(\d+)/$', empty_view, name="price"),
url(r'^price/[$](\d+)/$', empty_view, name="price2"),
url(r'^price/[\$](\d+)/$', empty_view, name="price3"),
url(r'^product/(?P<product>\w+)\+\(\$(?P<price>\d+(\.\d+)?)\)/$', empty_view, name="product"),
url(r'^headlines/(?P<year>\d+)\.(?P<month>\d+)\.(?P<day>\d+)/$', empty_view, name="headlines"),
url(r'^windows_path/(?P<drive_name>[A-Z]):\\(?P<path>.+)/$', empty_view, name="windows"),
url(r'^special_chars/(?P<chars>.+)/$', empty_view, name="special"),
url(r'^(?P<name>.+)/\d+/$', empty_view, name="mixed"),
url(r'^repeats/a{1,2}/$', empty_view, name="repeats"),
url(r'^repeats/a{2,4}/$', empty_view, name="repeats2"),
url(r'^repeats/a{2}/$', empty_view, name="repeats3"),
url(r'^(?i)CaseInsensitiv | e/(\w+)', empty_view, name="insensitive"),
url(r'^test/1/?', empty_view, name="test"),
url( | r'^(?i)test/2/?$', empty_view, name="test2"),
url(r'^outer/(?P<outer>\d+)/', include('urlpatterns_reverse.included_urls')),
url(r'^outer-no-kwargs/(\d+)/', include('urlpatterns_reverse.included_no_kwargs_urls')),
url('', include('urlpatterns_reverse.extra_urls')),
# This is non-reversible, but we shouldn't blow up when parsing it.
url(r'^(?:foo|bar)(\w+)/$', empty_view, name="disjunction"),
# Partials should be fine.
url(r'^partial/', empty_view_partial, name="partial"),
url(r'^partial_wrapped/', empty_view_wrapped, name="partial_wrapped"),
# Regression views for #9038. See tests for more details
url(r'arg_view/$', 'kwargs_view'),
url(r'arg_view/(?P<arg1>\d+)/$', 'kwargs_view'),
url(r'absolute_arg_view/(?P<arg1>\d+)/$', absolute_kwargs_view),
url(r'absolute_arg_view/$', absolute_kwargs_view),
# Tests for #13154. Mixed syntax to test both ways of defining URLs.
url(r'defaults_view1/(?P<arg1>\d+)/', 'defaults_view', {'arg2': 1}, name='defaults'),
(r'defaults_view2/(?P<arg1>\d+)/', 'defaults_view', {'arg2': 2}, 'defaults'),
url('^includes/', include(other_patterns)),
# Security tests
url('(.+)/security/$', empty_view, name='security'),
)
|
# Django settings for hikeplanner project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Jaime', 'jaime.m.mccandless@gmail.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
#'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'hikeplanner', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'postgres',
'PASSWORD': 'hiketime',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '^#zyha@)nr_z=#ge2esa0kc1+1f56tfa-nuox5%!^+hqgo%7w*'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.messages.context_processors.messages',
'django.contrib.auth.context_processors.auth',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'src.urls'
# Python dotted path to the WSGI appl | ication used by Django's runserver.
WSGI_APPLICATION = 'src.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
"hikes/templates"
)
INSTALLE | D_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.gis',
'hikes',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
rom rhn.client import rhnHardware
rhnHardware.updateHardware()
# FIXME (20050415): Proper output method
print "Hardware profile refresh successful"
if action == "showall" or action == "show_available" \
or action == "showall_with_channels" or action == "show_available_with_channels":
# Show the latest of each package in RHN
pkgs = self.getRHNPackages(latest=True)
for pkg in pkgs:
if action.startswith("show_available") and pkg.installed: continue
if action.endswith("_with_channels"):
channelName = ""
for (ldr,info) in pkg.loaders.items():
channel = ldr.getChannel()
if channel.getType() == "solaris-rhn":
channelLabel = info['baseurl'][6:-1]
break
print "%-40s%-30s" % (str(pkg), channelLabel)
else:
print str(pkg)
if action == "show_orphans":
pkgs = self.getPackages()
rhn_pkgs = self.getRHNPackages(reload=False)
for pkg in pkgs:
if pkg not in rhn_pkgs:
print str(pkg)
if action == "get":
import smart.commands.download as download
opts = download.parse_options([])
opts.args = argv
opts.yes = True
result = download.main(self._ctrl, opts)
if action == "show_channels":
serverSettings = ServerSettings()
li = rhnAuth.getLoginInfo()
channels = li.get('X-RHN-Auth-Channels')
for channelInfo in channels:
print channelInfo[0]
return result
def getProgress(self, obj, hassub=False):
self._progress.setHasSub(hassub)
self._progress.setFetcherMode(isinstance(obj, Fetcher))
return self._progress
def getSubProgress(self, obj):
return self._progress
def showStatus(self, msg):
if self._activestatus:
pass
# print
else:
self._activestatus = True
#sys.stdout.write(msg)
#sys.stdout.flush()
def hideSta | tus(self):
if self._activestatus:
self._activestatus = False
print
def askYesNo(self, question, default=False):
self.hideStatus()
mask = default and _("%s (Y/n): ") or _("%s (y/N): ")
res = raw_input(mask % question).strip().lowe | r()
print
if res:
return (_("yes").startswith(res) and not
_("no").startswith(res))
return default
def askContCancel(self, question, default=False):
self.hideStatus()
if default:
mask = _("%s (Continue/cancel): ")
else:
mask = _("%s (continue/Cancel): ")
res = raw_input(mask % question).strip().lower()
print
if res:
return (_("continue").startswith(res) and not
_("cancel").startswith(res))
return default
def askOkCancel(self, question, default=False):
self.hideStatus()
mask = default and _("%s (Ok/cancel): ") or _("%s (ok/Cancel): ")
res = raw_input(mask % question).strip().lower()
print
if res:
return (_("ok").startswith(res) and not
_("cancel").startswith(res))
return default
def confirmChangeSet(self, changeset):
return self.showChangeSet(changeset, confirm=True)
def askInput(self, prompt, message=None, widthchars=None, echo=True):
print
if message:
print message
prompt += ": "
try:
if echo:
res = raw_input(prompt)
else:
res = getpass.getpass(prompt)
except KeyboardInterrupt:
res = ""
print
return res
def askPassword(self, location, caching=OPTIONAL):
self._progress.lock()
passwd = Interface.askPassword(self, location, caching)
self._progress.unlock()
return passwd
def insertRemovableChannels(self, channels):
self.hideStatus()
print
print _("Insert one or more of the following removable channels:")
print
for channel in channels:
print " ", str(channel)
print
return self.askOkCancel(_("Continue?"), True)
# Non-standard interface methods:
def showChangeSet(self, changeset, keep=None, confirm=False):
self.hideStatus()
report = Report(changeset)
report.compute()
screenwidth = getScreenWidth()
hideversion = sysconf.get("text-hide-version", len(changeset) > 40)
if hideversion:
def cvt(lst):
return [x.name for x in lst]
else:
def cvt(lst):
return lst
print
if keep:
keep = cvt(keep)
keep.sort()
print _("Kept packages (%d):") % len(keep)
printColumns(keep, indent=2, width=screenwidth)
print
pkgs = report.upgrading.keys()
if pkgs:
pkgs = cvt(pkgs)
pkgs.sort()
print _("Upgrading packages (%d):") % len(pkgs)
printColumns(pkgs, indent=2, width=screenwidth)
print
pkgs = report.downgrading.keys()
if pkgs:
pkgs = cvt(pkgs)
pkgs.sort()
print _("Downgrading packages (%d):") % len(pkgs)
printColumns(pkgs, indent=2, width=screenwidth)
print
pkgs = report.installing.keys()
if pkgs:
pkgs = cvt(pkgs)
pkgs.sort()
print _("Installed packages (%d):") % len(pkgs)
printColumns(pkgs, indent=2, width=screenwidth)
print
pkgs = report.removed.keys()
if pkgs:
pkgs = cvt(pkgs)
pkgs.sort()
print _("Removed packages (%d):") % len(pkgs)
printColumns(pkgs, indent=2, width=screenwidth)
print
dsize = report.getDownloadSize()
size = report.getInstallSize() - report.getRemoveSize()
if dsize:
sys.stdout.write(_("%s of package files are needed. ") %
sizeToStr(dsize))
if size > 0:
sys.stdout.write(_("%s will be used.") % sizeToStr(size))
elif size < 0:
size *= -1
sys.stdout.write(_("%s will be freed.") % sizeToStr(size))
if dsize or size:
sys.stdout.write("\n\n")
if confirm:
return self.askYesNo(_("Confirm changes?"), True)
return True
class RHNSolarisPolicyInstall(PolicyInstall):
def getPriorityWeights(self, targetPkg, providingPkgs):
# We first need to determine whether we are dealing with a package
# or a patch. For packages, we'll defer to the standard installation
# policy; we only want special behavior for patches.
#
if not targetPkg.isPatch():
return \
PolicyInstall.getPriorityWeights(self, targetPkg, providingPkgs)
# At this point, we have a list of patches. We'll assign weights based
# on how qualified each providing package is. Here's how:
#
# Let T be the package we wish to find the best provider for.
# Let P be the set of patches which was determined to provide T.
# For each P[i], let X be the the set of patches that provides P[i].
#
# We determine qualification based on count(X) for each P[i]. The
# lower the count(X), the more qualified P[i] is, and the higher it
# will be weighted.
#
# In the SmartPM dep solver, a lower weight indicates a better match.
# Therefore, at the end of this algorithm, the P[i] with the lowest
# count(X) should be the lowest-weighted. In the event of a tie, where
# more than one P[i] is of equally low weight, we allow the "winner" to
# |
y the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2006 Anthony Liguori <aliguori@us.ibm.com>
# Copyright (C) 2006 XenSource Inc.
# Copyright (C) 2007 Red Hat Inc., Michael DeHaan <mdehaan@redhat.com>
#============================================================================
"""
An enhanced XML-RPC client/server interface for Python.
"""
import re
import fcntl
from types import *
import os
import errno
import traceback
from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
import SocketServer
import xmlrpclib, socket, os, stat
#import mkdir
#
# Convert all integers to strings as described in the Xen API
#
def stringify(value):
if isinstance(value, long) or \
(isinstance(value, int) and not isinstance(value, bool)):
return str(value)
elif isinstance(value, dict):
new_value = {}
for k, v in value.items():
new_value[stringify(k)] = stringify(v)
return new_value
elif isinstance(value, (tuple, list)):
return [stringify(v) for v in value]
else:
return value
# We're forced to subclass the RequestHandler class so that we can work around
# some bugs in Keep-Alive handling and also enabled it by default
class XMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
protocol_version = "HTTP/1.1"
def __init__(self, request, client_address, server):
SimpleXMLRPCRequestHandler.__init__(self, request, client_address,
server)
# this is inspired by SimpleXMLRPCRequestHandler's do_POST but differs
# in a few non-trivial ways
# 1) we never generate internal server errors. We let the exception
# propagate so that it shows up in the Xend debug logs
# 2) we don't bother checking for a _dispatch function since we don't
# use one
def do_POST(self):
addrport = self.client_address
#if not connection.hostAllowed(addrport, self.hosts_allowed):
# self.connection.shutdown(1)
# return
data = self.rfile.read(int(self.headers["content-length"]))
rsp = self.server._marshaled_dispatch(data)
self.send_response(200)
self.send_header("Content-Type", "text/xml")
self.send_header("Content-Length", str(len(rsp)))
self.end_headers()
self.wfile.write(rsp)
self.wfile.flush()
#if self.close_connection == 1:
# self.connection.shutdown(1)
def parents(dir, perms, enforcePermissions = False):
"""
Ensure that the given directory exists, creating it if necessary, but not
complaining if it's already there.
@param dir The directory name.
@param perms One of the stat.S_ constants.
@param enforcePermissions Enforce our ownership and the given permissions,
even if the directory pre-existed with different ones.
"""
# Catch the exception here, rather than checking for the directory's
# existence first, to avoid races.
try:
os.makedirs(dir, perms)
except OSError, exn:
if exn.args[0] != errno.EEXIST or not os.path.isdir(dir):
raise
if enforcePermissions:
os.chown(dir, os.geteuid(), os.getegid())
os.chmod(dir, stat.S_IRWXU)
# This is a base XML-RPC server for TCP. It sets allow_reuse_address to
# true, and has an improved marshaller that logs and serializes exceptions.
class TCPXMLRPCServer(SocketServer.ThreadingMixIn, SimpleXMLRPCServer):
allow_reuse_address = True
def __init__(self, addr, requestHandler=None,
logRequests = 1):
if requestHandler is None:
requestHandler = XMLRPCRequestHandler
SimpleXMLRPCServer.__init__(self, addr,
(lambda x, y, z:
requestHandler(x, y, z)),
logRequests)
flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
def get_request(self):
(client, addr) = SimpleXMLRPCServer.get_request(self)
flags = fcntl.fcntl(client.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(client.fileno(), fcntl.F_SETFD, flags)
return (client, addr)
def _marshaled_dispatch(self, data, dispatch_method = None):
params, method = xmlrpclib.loads(data)
if False:
# Enable this block of code to exit immediately without sending
# a response. This allows you to test client-side crash handling.
import sys
sys.exit(1)
| try:
i | f dispatch_method is not None:
response = dispatch_method(method, params)
else:
response = self._dispatch(method, params)
if (response is None or
not isinstance(response, dict) or
'Status' not in response):
#log.exception('Internal error handling %s: Invalid result %s',
# method, response)
response = { "Status": "Failure",
"ErrorDescription":
['INTERNAL_ERROR',
'Invalid result %s handling %s' %
(response, method)]}
# With either Unicode or normal strings, we can only transmit
# \t, \n, \r, \u0020-\ud7ff, \ue000-\ufffd, and \u10000-\u10ffff
# in an XML document. xmlrpclib does not escape these values
# properly, and then breaks when it comes to parse the document.
# To hack around this problem, we use repr here and exec above
# to transmit the string using Python encoding.
# Thanks to David Mertz <mertz@gnosis.cx> for the trick (buried
# in xml_pickle.py).
if isinstance(response, StringTypes):
response = repr(response)[1:-1]
response = (response,)
response = xmlrpclib.dumps(response,
methodresponse=1,
allow_none=1)
except Exception, exn:
try:
#if self.xenapi:
# if _is_not_supported(exn):
# errdesc = ['MESSAGE_METHOD_UNKNOWN', method]
# else:
# #log.exception('Internal error handling %s', method)
# errdesc = ['INTERNAL_ERROR', str(exn)]
#
# response = xmlrpclib.dumps(
# ({ "Status": "Failure",
# "ErrorDescription": errdesc },),
# methodresponse = 1)
#else:
# import xen.xend.XendClient
if isinstance(exn, xmlrpclib.Fault):
response = xmlrpclib.dumps(exn)
else:
# log.exception('Internal error handling %s', method)
response = xmlrpclib.dumps(
xmlrpclib.Fault(101, str(exn)))
except Exception, exn2:
# FIXME
traceback.print_exc()
return response
notSupportedRE = re.compile(r'method "(.*)" is not supported')
def _is_not_supported(exn):
try:
m = notSupportedRE.search(exn[0])
return m is not None
except:
return False
# This is a XML-RPC server that sits on a Unix domain socket.
# It implements proper support for allow_reuse_address by
# unlink()'i |
import os
import zipfile
import csv
from django.core.management.base import BaseCommand
from django.db import transaction
from django.conf import settings
from ...models import Service
def get_service(row):
for region in 'EA', 'EM', 'WM', 'SE', 'SW':
col = 'TNDS-' + region
if row[col]:
return Service.objects.filter(region=region, service_code__endswith=''.join(row[col].split('-')[:-1]))
for region in 'S', 'Y', 'NE':
col = 'TNDS-' + region
if row[col]:
return Service.objects.filter(region=region, service_code=row[col])
if row['TNDS-NW']:
return Service.objects.filter(region=region, service_code__endswith=''.join(row['TNDS-NW'].split('_')[:-1]))
def handle_file(open_file):
for row in csv.DictReader(line.decode() for line in open_file):
service = get_service(row)
if service:
if row['HighFloor'] == 'LF':
low_floor = True
elif row['HighFloor'] == 'HF':
low_floor = False
else:
low_floor = None
service.update(wheelchair=row['Wheelchair Access'] == 'TRUE',
low_floor=low_ | floor,
assistance_service=row['Assistance Service'] == 'TRUE',
| mobility_scooter=row['MobilityScooter'] == 'TRUE')
class Command(BaseCommand):
@transaction.atomic
def handle(self, *args, **options):
path = os.path.join(settings.DATA_DIR, 'accessibility-data.zip')
with zipfile.ZipFile(path) as archive:
for path in archive.namelist():
if 'IF145' in path:
with archive.open(path, 'r') as open_file:
handle_file(open_file)
|
import re
CJDNS_IP_REGEX = re.compile(r'^fc[0-9a-f]{2}(:[0-9a-f]{4}){7}$', re.IGNORECASE)
class Node(object):
def __init__(self, ip, version=None, label=None):
if not valid_cjdns_ip(ip):
raise ValueError('Invalid IP address')
if not valid_version(version):
raise ValueError('Invalid version')
sel | f.ip = ip
self.version = int(version)
self.label = ip[-4:] or label
def __lt__(self, b):
return self.ip < b.ip
def __repr__(self):
return 'Node(ip="%s", version=%s, label="%s")' % | (
self.ip,
self.version,
self.label)
class Edge(object):
def __init__(self, a, b):
self.a, self.b = sorted([a, b])
def __eq__(self, that):
return self.a.ip == that.a.ip and self.b.ip == that.b.ip
def __repr__(self):
return 'Edge(a.ip="{}", b.ip="{}")'.format(self.a.ip, self.b.ip)
def valid_cjdns_ip(ip):
return CJDNS_IP_REGEX.match(ip)
def valid_version(version):
try:
return int(version) < 30
except ValueError:
return False
|
from app import | db
class Pets(db.Model):
__tablename__ = 'pets'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), unique=True)
color = db.Column(db.String(30))
pet = db.Column(db.String(10))
def __init__(self, name, color, pet):
self.name = name
self.color = color
self.pet = pet
def __repr__(self):
return '<id | {}>'.format(self.id)
|
t.
# import sys
# import os
from pygments.lexers.web import PhpLexer
from sphinx.highlighting import lexers
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration --------------------------------------- | ---------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.ifc | onfig',
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Document Repository'
copyright = '2016, Joseph Robinson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '1'
# The full version, including alpha/beta/rc tags.
# release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'samples', 'README.rst', 'common/*']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'document-repository'
# this will change the 'paragraph' character to '#'
html_add_permalinks = '#'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [('index', 'documentation-repository.tex',
'Documentation Repository',
'Joseph Robinson', 'manual')]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'documentation-repository',
'Documentation Repository', ['Joseph Robinson'], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [('index', 'documentation-repository',
'Documentation Repository',
'Joseph Robinson', 'documentation-repository')]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_deta |
lSize/2**20,2))
else:
if totalSize!=-1 :
print ('\r','%.2f'%round(blockNum*blockSize/2**20,2),
'/','%.2f'%round(totalSize/2**20,2),
'MB ','%.2f'%round(blockNum*blockSize/totalSize*100,2),'%',end='')
else:
print ('\r','%.2f'%round(blockNum*blockSize/2**20,2),
'MB Downloaded.',end='')
# end {showProgress}
def recordToFile(record,msg):
'''Record msg to file, using binary mode'''
if len(msg.encode())>50:
while len(msg.encode())>47:
msg=msg[0:-int((len(msg.encode())-47)/2)-1]
msg=msg+'...'
written=record.write( '{: <50}'.format(msg).encode() )
record.seek(-written,1)
# end {recordToFile}
def checkDir(filename):
'''To check whether the parent of filename exists. If not, create it.
filename should be a string'''
from pathlib import Path
p=Path(filename)
try:
if not p.parent.exists():
p.parent.mkdir(parents=True)
except:
print ('Error occurred when creating Directory:',str(p.parent))
# end {checkDir}
def download(url, filename):
''' Download url to filename, support break point resume. '''
import errno
import socket
import time
import configparser
# get url info
urlHandler = urllib.request.urlopen( url,timeout=10 )
headers = urlHandler.info()
size = int(headers.get('Content-Length'))
lastmodified=headers.get('Last-Modified')
# access download info file
infoname=filename+'.lock'
info=configparser.ConfigParser()
if os.path.exists(infoname):
info.read(infoname)
try:
if (info.get('FileInfo','size')!=str(size) or
info.get('FileInfo','url')!=str(url) or
info.get('FileInfo','lastmodified')!=str(lastmodified)):
info.remove_section('FileInfo')
print('File changed, restart download.')
except:
info.remove_section('FileInfo')
print('.lock file damaged, restart download.')
# end if
# decide whether to resume or restart
if not info.has_section('FileInfo'):
info.add_section('FileInfo')
info.set('FileInfo','size',str(size))
info.set('FileInfo','url',str(url))
info.set('FileInfo','lastmodified',str(lastmodified))
with open(infoname,'w') as f:
info.write(f)
# delete existing file
open(filename,'wb').close()
# rebuild start point
try:
downloaded = os.path.getsize(filename )
except OSError:
downloaded = 0
startpoint = downloaded
# start download
connectionError=True
resetCounter=0
while connectionError and resetCounter<10:
connectionError=False
try:
if startpoint < size:
oneTimeSize = 65535 #64KB/time
urlHandler = urllib.request.Request(url)
urlHandler.add_header("Range", "bytes=%d-%d" % (startpoint, size))
urlHandler = urllib.request.urlopen(urlHandler,timeout=10)
data = urlHandler.read( oneTimeSize )
with open( filename, 'ab+' ) as filehandle:
while data:
filehandle.write( data )
downloaded += len( data )
showProgress(1, downloaded, size)
data = urlHandler.read( oneTimeSize )
# end if
except urllib.error.HTTPError as errinfo:
# HTTP Error
if errinfo.code==errno.ECONNRESET:
# Connection reset by peer, connect again
| connectionError=True
resetCounter+=1
else:
raise
except urllib.error.URLError as errinfo:
| # URL Error
if (isinstance(errinfo.reason,socket.gaierror) and
errinfo.reason.errno==-2):
# Name or service not known, usually caused by internet break or wrong server address
connectionError=True
resetCounter+=1
time.sleep(10)
else:
raise
except socket.timeout:
# request timeout
connectionError=True
resetCounter+=1
# end try
# end while
# if resetCounter>10 and there is a connectionError then raise it
if connectionError:
raise Exception('Connection Error')
# check if download finished successfully
try:
downloaded = os.path.getsize(filename )
except OSError:
downloaded = 0
if downloaded==size:
# remove info file
os.remove(infoname)
return 'Success'
elif downloaded>size:
os.remove(infoname)
return 'The size of file downloaded is bigger than that on server.'
else:
return ('Download Not Finished! The size of file downloaded is smaller than that on server.'
' If this error continues, please try delete the file downloaded.')
#end {def download}
def simpleDownload(url, filename):
'''Simple download method, do not suppot break point resume, but may be faster for small files'''
urllib.request.urlretrieve(url, filename, showProgress)
return 'Success'
# end {def simpleDownload}
#-----------------------------------------------------------------------------------------------------------------
# main procedure start from here
# get current working directory
#currentPath=os.path.dirname(performDownload)
#os.chdir(str(currentPath))
currentPath=os.getcwd()
# read download info from 'downloadList.csv'
with open('downloadList.csv') as f:
downloadlist=f.readlines()
onError=False # error flag
# open 'downloadList.csv' for status maintenance
with open('downloadList.csv','rb+') as record:
for item in downloadlist:
if item=='\n': continue
# parse item
status, localname, url = item[:-1].split(';')
status = status.strip(' ').upper()
# check status for downloading
if status in {'QUEUED','PAUSED',''}:
# record status
recordToFile(record,'Downloading')
# start download
print ('Begin to download',url)
print ('Save to:',localname)
checkDir(currentPath+localname[1:]) # check if parent dir exists
try:
if smallFileMode:
result=simpleDownload(url,
currentPath+localname[1:])
else:
result=download(url,
currentPath+localname[1:])
# if download not success, raise exception
if result!='Success':
raise Exception(result)
except urllib.error.HTTPError as errinfo:
# 404 Not Found
print ('\r'+str(errinfo))
recordToFile(record,str(errinfo))
except KeyboardInterrupt as errinfo:
# Ctrl+C Interrupt
print ('\rDownload Abort!'+20*' ')
if smallFileMode:
# reset status to 'Queued' since smallFileMode don't support break point resume
recordToFile(record,'Queued')
else:
# set status to 'Paused'
recordToFile(record,'Paused')
onError=True
break
except Exception as errinfo:
# Other exceptions
print ('\rUnexpected Error!('+str(errinfo)+')')
recordToFile(record,str(errinfo))
except:
# Unexpected exceptions
print ('\rUnexpected Error!'+20*' ')
recordToFile(record,'Unexpected Error')
else:
# Download success, write 'Downloaded' to file
recordToFile(record,'Downloaded')
|
def do | _something():
for i in range(100):
p | rint(i)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.