prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
from kubeflow.kubeflow.crud_backend import api, status
def pvc_status(pvc):
"""
Set the status of the pvc
"""
if pvc.metadata.deletion_timestamp is not None:
return status.create_status(status.STATUS_PHASE.TERMINATING,
"Deleting Volume...")
if pvc.status.phase == "Bound":
return status.create_status(status.STATUS_PHASE.READY, "Bound")
# The PVC is in Pending state, we check the Events to find out why
evs = api.v1_core.list_namespaced_event(
namespace=pvc.metadata.namespace,
field_selector=api.events_field_selector(
"PersistentVolumeClaim", pvc.metadata.name
),
).items
# If there are no events, then the PVC was just created
if len(evs) == 0:
return status.create_status(status.STATUS_PHASE.WAITING,
"Provisioning Volume...")
msg = f"Pending: {evs[0].message}"
state = evs[0].reason
if evs[0].reason == "WaitForFirstConsumer":
phase = status.STATUS_PHASE.UNAVAILABLE
msg = (
"Pending: This volume will be bound when its first consumer"
" is created. E.g., when you first browse its contents, or"
" attach it to a notebook server"
)
elif evs[0].reason == "Provisioning":
phase = status.STATUS_PHASE.WAITING
elif evs[0].reason == "FailedBinding":
phase = status.STATUS_PHASE.WARNING
elif evs[0].type == "Warning":
phase = status.STATUS_PHASE.WARNING
elif evs[0].type == "Normal":
phase = status.STATUS_PHASE.READY
return status.create_status(phase, msg, state)
def viewer_status(viewer):
"""
Return a string representing the | status of that viewer. If a deletion
timestamp is set we want to return a `Terminating` state.
"""
try:
ready = viewer["status"]["ready"]
except KeyError:
return status.STATUS_PHASE.UNINITIALIZED
if "deletionTimestamp" in viewer["metadata"]:
return status.STATUS_PHASE.TERMINATING
if not ready:
return status.STATUS_PH | ASE.WAITING
return status.STATUS_PHASE.READY
|
ource, destination) ]
def generate_favicon_resources():
fav_tpl = lambda r: "favicon-{0}x{0}.png".format(r)
and_tpl = lambda r: "touch-icon-{0}x{0}.png".format(r)
app_tpl = lambda r: "apple-touch-icon-{0}x{0}.png".format(r)
pra_tpl = lambda r: "apple-touch-icon-{0}x{0}-precomposed.png".format(r)
fav_path = lambda p: normpath(join("static/favicon", p))
favicon_tpl = normpath(join(SCRIPT_DIR, "res/favicon.svg"))
ico_res = [ "16", "24", "32", "48", "64", "128", "256" ]
fav_res = [ "16", "32", "96", "160", "196", "300" ]
android_res = [ "192" ]
apple_res = [ "57", "76", "120", "152", "180" ] # add to head backwards
if not isdir("static/favicon"): os.makedirs("static/favicon")
# generate favicon resources
for res in (list(set(ico_res) | set(fav_res)) + android_res + apple_res):
if res in android_res: path = abspath( fav_path(and_tpl(res)) )
elif res in apple_res: path = abspath( fav_path(app_tpl(res)) )
else: path = abspath( fav_path(fav_tpl(res)) )
sCall("inkscape", "-z", "-e", path, "-w", res, "-h", res, favicon_tpl)
sCall( *(["convert"] + [fav_path(fav_tpl(r)) for r in ico_res] +
[fav_path("favicon.ico")]) )
for res in [ r for r in ico_res if r not in fav_res ]:
os.remove(fav_path(fav_tpl(res)))
# return routes for generated favicon resources
fav_route = lambda f: STATIC_ROUTE(f, f, "static/favicon")
app_route = lambda p,t: STATIC_ROUTE(p, t("57"), "static/favicon")
return ([ fav_route("favicon.ico") ] +
[ fav_route(fav_tpl(r)) for r in fav_res ] +
[ fav_route(and_tpl(r)) for r in android_res ] +
[ fav_route(app_tpl(r)) for r in apple_res if r!="57" ] +
[ fav_route(pra_tpl(r)) for r in apple_res if r!="57" ] +
[ app_route("apple-touch-icon.png", app_tpl),
app_route("apple-touch-icon-precomposed.png", pra_tpl) ])
def generate_stylesheets():
dev_path = join( SCRIPT_DIR, "dev/sass" )
is_sass = lambda f: splitext(f)[-1].lower() in ['.scss', '.sass']
is_mixin = lambda f: match(r'.*mixins?$', splitext(f)[0].lower())
get_import = lambda p: [ join( relpath(r, dev_path), f )
for r, d, fs in os.walk( join(dev_path, p) )
for f in fs if is_sass(f) ]
if not isdir("static/css"): os.makedirs("static/css")
# generate _all.scss file from existing sass resources
with open( join( dev_path, '_all.scss' ), 'w') as f:
f.write('\n'.join( # probably not the most efficient way
[ '@import "{}";'.format(path.replace('\\', '/')) for path in
( # mixins and global variables must be imported first
# modules
[ f for f in get_import('modules') ]
# vendor mixins
+ [ f for f in get_import('vendor') if is_mixin(f) ]
# all other vendor files
+ [ f for f in get_import('vendor') if not is_mixin(f) ]
# partials (comment out this line for manually selection)
+ [ f for f in get_import('partials') ]
)
] )
)
# use sass command line tool to generate stylesheets
stylesheets = [ splitext(f)[0] for f in os.listdir(dev_path)
if is_sass(f) and not f.startswith('_') ]
sass_path = relpath(dev_path, os.getcwd()).replace('\\', '/')
if args.deploy:
for s in stylesheets:
sCall("sass", sass_path+"/"+s+".scss", "static/css/"+s+".min.css",
"-t", "compressed", "--sourcemap=none", "-C")
os.remove( join(dev_path, "_all.scss") )
else:
Template.populate(WATCH_SASS_SCRIPT, '../dev/sass/watch.py')
command = "sass --watch"
for s in stylesheets:
command += " ../dev/sass/{0}.scss:./static/css/{0}.css".format(s)
p = Popen(command, shell=True)
#p = sPopen( 'python', '../dev/sass/watch.py', *stylesheets )
sleep(3) # delay so the stylesheets have time to be created
p.kill() # note: kill sends SIGKILL
# return css routes from generated stylesheets
return [ STATIC_ROUTE(f, f, "static/css") for f in os.listdir("static/css")]
def generate_javascript():
return migrate_static_files("dev/js", "static/js")
def get_favicon_head():
link_tpl = lambda c: ' <link {0}>\n'.format(c)
all_favs = os.listdir('static/favicon')
favicons = [ x for x in all_favs if x.startswith('favicon') ]
apple_favs = [ x for x in all_favs if x.startswith('apple') ]
android_favs = [ x for x in all_favs if not x in favicons + apple_favs ]
fav_head = link_tpl('rel="shortcut icon" href="favicon.ico"')
favicons.remove('favicon.ico')
def gen_head(fav_tpl, fav_set):
dic = {}
for fav in fav_set:
res = int(search(r'([0-9]+)x', fav).group(1))
dic[res] = fav
keys = list(dic.keys())
keys.sort()
keys.reverse()
for key in keys:
yield link_tpl( fav_tpl.format(key, dic[key]) )
for fav_set in [
('rel="icon" sizes="{0}x{0}" href="/{1}"', android_favs),
('rel="apple-touch-icon" sizes="{0}x{0}" href="/{1}"', apple_fav | s),
| ('rel="icon" type="image/png" sizes="{0}x{0}" href="/{1}"', favicons) ]:
fav_head += "".join( gen_head(*fav_set) )
return fav_head
def get_opengraph_head():
og_head_string = """\
% url = request.environ['HTTP_HOST']
<meta property="og:url" content="http://{{url}}/">
<meta property="og:type" content="website">
<meta property="og:title" content="{{title}}">
<meta property="open_graph_image">
<meta property="og:description" content="{{description}}">"""
og_image_string = """<meta property="og:image:type" content="image/png">
<meta property="og:image:width" content="300">
<meta property="og:image:height" content="300">
<meta property="og:image:url" content="http://{{url}}/favicon-300x300.png">
<meta property="og:image" content="http://{{url}}/favicon-300x300.png">"""
if isfile("static/favicon/favicon-300x300.png"):
og_head_string = og_head_string.replace(
'<meta property="open_graph_image">',
og_image_string
)
return og_head_string
def get_stylesheet_head():
styles_tpl = ' <link rel="stylesheet" type="text/css" href="/{0}">\n'
stylesheets = os.listdir('static/css')
styles_head = ''
for style in stylesheets:
if style.split('.')[0] == 'styles':
styles_head += styles_tpl.format(style)
stylesheets.remove(style)
break
stylesheets = [ s.split('.')[0] for s in stylesheets ]
styles_head += " % if template in {}:\n".format(stylesheets)
tpl_style = '{{template}}.min.css' if args.deploy else '{{template}}.css'
styles_head += styles_tpl.format(tpl_style)
styles_head += " % end"
return styles_head
os.chdir(args.path)
if isdir("www"): rmtree("www")
os.makedirs("www")
os.chdir("www")
### Import Bottle Framework ####################################################
from urllib.error import URLError
bottle_url = "https://raw.githubusercontent.com/bottlepy/bottle/master/bottle.py"
try:
with urlopen(bottle_url) as response, open('bottle.py', 'wb') as f:
copyfileobj(response, f)
except URLError as e:
print(e)
### Generate App.py ############################################################
Template.populate(APP_PY_TEMPLATE, 'app.py',
doc_string="",
main_routes=migrate_views(),
api_routes=get_api_routes(),
static_routes=migrate_static_files("res/static", "static"),
favicon_routes=generate_favicon_resources(),
image_routes=migrate_static_files("res/img", "static/img"),
font_routes=migrate_static_files("res/font", "static/font"),
css_routes=generate_stylesheets(),
js_routes=generate_javascript() )
### Generate Head Template #####################################################
if isfile('views/~head.tpl'): os.remove |
"""Model. We are modeling Person objects with a collection
of Address objects. Each Address has a PostalCode, which
in turn references a City and then a Country:
Person --(1..n)--> Address
Address --(has a)--> PostalCode
PostalCode --(has a)--> City
City --(has a)--> Country
"""
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship
from meta import Base, FromCache, Session, RelationshipCache
class Country(Base):
__tablename__ = 'country'
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
def __init__(self, name):
self.name = name
class City(Base):
__tablename__ = 'city'
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
country_id = Column(Integer, ForeignKey('country.id'), nullable=False)
country = relationship(Country)
def __init__(self, name, country):
self.name = name
self.country = country
class PostalCode(Base):
__tablename__ = 'postal_code'
id = Column(Integer, primary_key=True)
code = Column(String(10), nullable=False)
city_id = Column(Integer, ForeignKey('city.id'), nullable=False)
city = relationship(City)
@property
def country(self):
return self.city.country
def __init__(self, code, city):
self.code = code
self.city = city
class Address(Base):
__tablename__ = 'address'
id = Column(Integer, primary_key=True)
person_id = Column(Integer, ForeignKey('person.id'), nullable=False)
street = Column(String(200), nullable=False)
postal_code_id = Column(Integer, ForeignKey('postal_code.id'))
postal_code = relationship(PostalCode)
@property
def city(self):
return self.postal_code.city
@property
def country(self):
return self.postal_code.country
def __str__(self):
return "%s\t"\
"%s, %s\t"\
"%s" % (self.street, self.city.name,
self.postal_code.code, self.country.name)
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
addresses = relationship(Address, collection_class=set)
def __init__(self, name, *addresses):
self.name = name
self.addresses = set(addresses)
def __str__(self):
return self.name
def __repr__(self):
return "Person(name=%r)" % self.name
def format_full(self):
return "\t".join([str(x) for x in [self] + list(self.addresses)])
# Caching options. A set of three RelationshipCache options
# which can be applied to Query(), causing the "lazy load"
# of these attri | butes to be loaded from cache.
cache_address_bit | s = RelationshipCache("default", "byid", PostalCode.city).\
and_(
RelationshipCache("default", "byid", City.country)
).and_(
RelationshipCache("default", "byid", Address.postal_code)
)
|
return p
if isinstance(p, string_types):
p = p.replace(' ', '')
try:
# we might have a Float
neg_pow, digits, expt = decimal.Decimal(p).as_tuple()
p = [1, -1][neg_pow]*int("".join(str(x) for x in digits))
if expt > 0:
# TODO: this branch needs a test
return Rational(p*Pow(10, expt), 1)
return Rational(p, Pow(10, -expt))
except decimal.InvalidOperation:
f = regex.match('^([-+]?[0-9]+)/([0-9]+)$', p)
if f:
n, d = f.groups()
return Rational(int(n), int(d))
elif p.count('/') == 1:
p, q = p.split('/')
return Rational(Rational(p), Rational(q))
else:
pass # error will raise below
else:
try:
if isinstance(p, fractions.Fraction):
return Rational(p.numerator, p.denominator)
except NameError:
pass # error will raise below
if isinstance(p, (float, Float)):
return Rational(*_as_integer_ratio(p))
if not isinstance(p, SYMPY_INTS + (Rational,)):
raise TypeError('invalid input: %s' % p)
q = S.One
else:
p = Rational(p)
q = Rational(q)
if isinstance(q, Rational):
p *= q.q
q = q.p
if isinstance(p, Rational):
q *= p.q
p = p.p
# p and q are now integers
if q == 0:
if p == 0:
if _errdict["divide"]:
raise ValueError("Indeterminate 0/0")
else:
return S.NaN
if p < 0:
return S.NegativeInfinity
return S.Infinity
if q < 0:
q = -q
p = -p
n = igcd(abs(p), q)
if n > 1:
p //= n
q //= n
if q == 1:
return Integer(p)
if p == 1 and q == 2:
return S.Half
obj = Expr.__new__(cls)
obj.p = p
obj.q = q
#obj._args = (p, q)
return obj
def limit_denominator(self, max_denominator=1000000):
"""Closest Rational to self with denominator at most max_denominator.
>>> from sympy import Rational
>>> Rational('3.141592653589793').limit_denominator(10)
22/7
>>> Rational('3.141592653589793').limit_denominator(100)
311/99
"""
# Algorithm notes: For any real number x, define a *best upper
# approximation* to x to be a rational number p/q such that:
#
# (1) p/q >= x, and
# (2) if p/q > r/s >= x then s > q, for any rational r/s.
#
# Define *best lower approximation* similarly. Then it can be
# proved that a rational number is a best upper or lower
# approximation to x if, and only if, it is a convergent or
# semiconvergent of the (unique shortest) continued fraction
# associated to x.
#
# To find a best rational approximation with denominator <= M,
# we find the best upper and lower approximations with
# denominator <= M and take whichever of these is closer to x.
# In the event of a tie, the bound with smaller denominator is
# chosen. If both denominators are equal (which can happen
# only when max_denominator == 1 and self is midway between
# two integers) the lower bound---i.e., the floor of self, is
# taken.
if max_denominator < 1:
raise ValueError("max_denominator should be at least 1")
if self.q <= max_denominator:
return self
p0, q0, p1, q1 = 0, 1, 1, 0
n, d = self.p, self.q
while True:
a = n//d
q2 = q0 + a*q1
if q2 > max_denominator:
break
p0, q0, p1, q1 = p1, q1, p0 + a*p1, q2
n, d = d, n - a*d
k = (max_denominator - q0)//q1
bound1 = Rational(p0 + k*p1, q0 + k*q1)
bound2 = Rational(p1, q1)
if abs(bound2 - self) <= abs(bound1 - self):
return bound2
else:
return bound1
def __getnewargs__(self):
return (self.p, self.q)
def _hashable_content(self):
return (self.p, self.q)
def _eval_is_positive(self):
return self.p > 0
def _eval_is_zero(self):
return self.p == 0
def __neg__(self):
return Rational(-self.p, self.q)
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Rational):
return Rational(self.p*other.q + self.q*other.p, self.q*other.q)
elif isinstance(other, Float):
return other + self
else:
return Number.__add__(self, other)
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Rational):
return Rational(self.p*other.q - self.q*other.p, self.q*other.q)
elif isinstance(other, Float):
return -other + self
else:
return Number.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Rational):
return Rational(self.p*other.p, self.q*other.q)
elif isinstance(other, Float):
return other*self
else:
return Number.__mul__(self, other)
@_sympifyit('other', NotImplemented)
def __div__(self, other):
if isinstance(other, Rational):
return Rational(self.p*other.q, self.q*other.p)
elif isinstance(other, Float):
return self*(1/other)
else:
return Number.__div | __(self, other)
__truediv__ = __div__
@_sympifyit('other', NotImplemented)
def __mod__(self, other):
if isinstance(other, Rational):
n = (self.p*other.q) // (other.p*self.q)
return Rational(self.p*other.q - n*other.p*self.q, self.q*other.q)
if isinstance(other, Float):
# calculate mod with Rationals, *then* round the answer
return Float(self.__mod__(Rational(other)),
prec_to_dps( | other._prec))
return Number.__mod__(self, other)
@_sympifyit('other', NotImplemented)
def __rmod__(self, other):
if isinstance(other, Rational):
return Rational.__mod__(other, self)
return Number.__rmod__(self, other)
def _eval_power(self, expt):
if isinstance(expt, Number):
if isinstance(expt, Float):
return self._eval_evalf(expt._prec)**expt
if expt.is_negative:
# (3/4)**-2 -> (4/3)**2
ne = -expt
if (ne is S.One):
return Rational(self.q, self.p)
if self.is_negative:
if expt.q != 1:
return -(S.NegativeOne)**((expt.p % expt.q) /
S(expt.q))*Rational(self.q, -self.p)**ne
else:
return S.NegativeOne**ne*Rational(self.q, -self.p)**ne
else:
return Rational(self.q, self.p)**ne
if expt is S.Infinity: # -oo already caught by test for negative
if self.p > self.q:
# (3/2)**oo -> oo
return S.Infinity
if self.p < -self.q:
# (-3/2)**oo -> oo + I*oo
return S.Infinity + S.Infinity*S.ImaginaryUnit
return S.Zero
if isinstance(expt, Integer):
# (4/3)**2 -> 4**2 / 3**2
return Rational(self.p**expt.p, self.q**expt.p)
if isinstance(expt, Rational):
if self.p != 1:
# (4/3)**(5/6) -> 4**(5/6)* |
from django.core import serializers
from rest_framework.response import Response
from django.http import JsonResponse
try:
from urllib import quote_plus # python 2
except:
pass
try:
from urllib.parse import quote_plus # python 3
except:
pass
from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from comments.forms import CommentForm
from comments.models import Comment
from .forms import PostForm
from .models import Post
def post_create(request):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
form = PostForm(request.POST or None, request.FILES or None)
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
# message success
messages.success(request, "Successfully Created")
return HttpResponseRedirect(instance.get_absolute_url())
context = {
"form": form,
}
return render(request, "post_form.html", context)
def post_detail(request, slug=None):
instance = get_object_or_404(Post, slug=slug)
if instance.publish > timezone.now().date() or instance.draft:
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
share_string = quote_plus(instance.content)
initial_data = {
"content_type": instance.get_content_type,
"object_id": instance.id
}
form = CommentForm(request.POST or None, initial=initial_data)
if form.is_valid() and request.user.is_authenticated():
c_type = form.cleaned_data.get("content_type")
content_type = ContentType.objects.get(model=c_type)
obj_id = form.cleaned_data.get('object_id')
content_data = form.cleaned_data.get("content")
parent_obj = None
try:
parent_id = int(request.POST.get("parent_id"))
except:
parent_id = None
if parent_id:
parent_qs = Comment.objects.filter(id=parent_id)
if parent_qs.exists() and parent_qs.count() == 1:
parent_obj = parent_qs.first()
new_comment, created = Comment.objects.get_or_create(
user=request.user,
content_type=content_type,
object_id=obj_id,
content=content_data,
parent=parent_obj,
)
return HttpResponseRedirect(new_comment.content_object.get_absolute_url())
comments = instance.comments
context = {
"title": instance.title,
"instance": instance,
"share_string": share_string,
"comments": comments,
"comment_form": form,
}
return render(request, "post_detail.html", context)
def post_list(request):
today = timezone.now().date()
queryset_list = Post.objects.active() # .order_by("-timestamp")
if request.user.is_staff or request.user.is_superuser:
queryset_list = Post.objects.all()
query = request.GET.get("q")
if query:
queryset_list = queryset_list.filter(
Q(title__icontains=query) |
Q(content__icontains=query) |
Q(user__first_name__icontains=query) |
Q(user__last_name__icontains=query)
).distinct() |
paginator = Paginator(queryset_list, 8) # Show 25 contacts per page
page_request_var = "page"
page = request.GET.get(page_request_var)
try:
queryset = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
queryset = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver | last page of results.
queryset = paginator.page(paginator.num_pages)
context = {
"object_list": queryset,
"title": "List",
"page_request_var": page_request_var,
"today": today,
}
return render(request, "post_list.html", context)
def post_update(request, slug=None):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
instance = get_object_or_404(Post, slug=slug)
form = PostForm(request.POST or None,
request.FILES or None, instance=instance)
if form.is_valid():
instance = form.save(commit=False)
instance.save()
messages.success(request, "<a href='#'>Item</a> Saved",
extra_tags='html_safe')
return HttpResponseRedirect(instance.get_absolute_url())
context = {
"title": instance.title,
"instance": instance,
"form": form,
}
return render(request, "post_form.html", context)
def post_delete(request, slug=None):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
instance = get_object_or_404(Post, slug=slug)
instance.delete()
messages.success(request, "Successfully deleted")
return redirect("posts:list")
|
import eventlet
import gettext
import sys
from staccato.common import config
import staccato.openstack.common.wsgi as os_wsgi
import staccato.openstack.common.pastedeploy as os_pastedeploy
# Monkey patch socket and time
eventlet.patcher.monkey_patch(all=False, socket=True, time=True)
gettext.install('staccato', unicode=1)
def fail(returncode, e):
sys.stderr.write("ERROR: %s\n" % e)
sys.exit(returncode)
def main():
try:
conf = config.get_config_object()
paste_file = conf.find_file(conf.paste_deploy.config_file)
wsgi_app = os_pastedeploy.paste_deploy_app(paste_file,
| 'staccato-api',
| conf)
server = os_wsgi.Service(wsgi_app, conf.bind_port)
server.start()
server.wait()
except RuntimeError as e:
fail(1, e)
main()
|
"""Utility methods for handling Entities.
These methods can be shared between entity generation (invoked through
the Entities class) at the start of prod data generation, and between
post processing methods (such as adding edges between family members
and neighbours).
"""
import codecs
import collections
import re
def get_surnames():
"""Retrieves a set of surnames from a provided data file."""
path_surnames = 'prod_generation/surnames.txt'
with codecs.open(path_surnames, 'r') as f:
return set(line.strip().lower() for line in f.readlines())
def get_academic_titles_parser():
"""Returns a regular expression for parsing academic titles."""
# Read list of academic titles from the data file.
path_titles = 'prod_generation/academic_titles.txt'
with codecs.open(path_titles, 'r') as f:
titles = set(line.strip() for line in f.readlines())
# Compile the regular expression.
re_titles = "|".join(titles)
re_name = ("^(?P<titles_pre>((%s)\.?( |,))*)"
"(?P<name_clean>.*?)"
"(?P<titles_suffix>(( |,)*(%s)\.?)*)$" % (
re_titles, re_titles))
return re.compile(re_name)
# NamedTuple for parsed entity names:
# - `titles_pre` is a string of academic titles detected before name
# - `firstnames` is a non-empty list of given names
# - `surname` is a string
# - `titles_suf` is a string of academic titles detected after name
ParsedName = collections.namedtuple(
"ParsedName",
["titles_prefix", "firstnames", "surname", "titles_suffix"]
)
def parse_entity_name(entity_name, titles_parser, surnames,
verbose=False):
"""Parses an entity name into a ParsedName, or returns None."""
if verbose:
print('entity_name = |%s|' % (entity_name))
# Remove newlines from `entity_na | me`:
entity_name = entity_name.replace("\n", " ")
# Trim name of Zivnost, followed by first occurrence of (' - ').
p = entity_name.find(' - ')
if (p > 0):
| name = entity_name[:p]
else:
name = entity_name
if verbose:
print('name = |%s|' % (name))
# Trim academic titles from the start and end of the name.
match = titles_parser.match(name).groupdict()
titles_pre = match['titles_pre'] if 'titles_pre' in match else ''
titles_suf = match['titles_suf'] if 'titles_suf' in match else ''
name_clean = match['name_clean']
if verbose:
print('name_clean = |%s|' % (name_clean))
# Split cleaned name on spaces (it should now be a list of
# firstnames, followed by a surname).
names = name_clean.split()
# Lowercase the names, so that we get case-insensitive matching on
# both surnames and firstnames downstream.
names = [name.lower() for name in names]
# Strict matching: Check that last name is a surname
# if len(names) >= 2 and names[-1] in surnames:
# return {
# 'titles_pre': titles_pre,
# 'firstnames': names[:-1],
# 'surname': names[-1],
# 'titles_suf': titles_suf,
# }
# Less conservative matching: Find the last token that is a surname,
# and take the rest before it as given names
i = len(names) - 1
while (i >= 1) and (names[i] not in surnames):
i -= 1
if i >= 1:
return ParsedName(
titles_prefix=titles_pre,
firstnames=names[:i],
surname=names[i],
titles_suffix=titles_suf
)
else:
if verbose:
print('Parse failed')
return None
|
[f[1] for f in fields]
f_string = [ "%s %s" %(f,t) for (f,t) in zip(self.all_fields,_types)]
sql = "CREATE TABLE %s (%s)" %(self.name,
",".join(f_string))
self.cursor.execute(sql)
return self
def open(self):
"""Open an existing database"""
if self._table_exists():
self.mode = "open"
self._get_table_info()
return self
# table not found
raise IOError,"Table %s doesn't exist" %self.name
def _table_exists(self):
"""Database-specific method to see if the table exists"""
self.cursor.execute("SHOW TABLES")
for table in self.cursor.fetchall():
if table[0].lower() == self.name.lower():
return True
return False
def _get_table_info(self):
"""Database-specific method to get field names"""
self.cursor.execute('DESCRIBE %s' %self.name)
self.all_fields = [ f[0] for f in self.cursor.fetchall() ]
self.fields = self.all_fields[2:]
def commit(self):
"""No use here ???"""
pass
def insert(self,*args,**kw):
"""Insert a record in the database
Parameters can be positional or keyword arguments. If positional
they must be in the same order as in the create() method
If some of the fields are missing the value is set to None
Returns the record identifier
"""
if args:
kw = dict([(f,arg) for f,arg in zip(self.all_fields[2:],args)])
kw["__version__"] = 0
vals = self._make_sql_params(kw)
sql = "INSERT INTO %s SET %s" %(self.name,",".join(vals))
res = self.cursor.execute(sql)
self.cursor.execute("SELECT LAST_INSERT_ID()")
__id__ = self.cursor.fetchone()[0]
return __id__
def delete(self,removed):
"""Remove a single record, or the records in an iterable
Before starting deletion, test if all records are in the base
and don't have twice the same __id__
Return the number of deleted items
"""
if isinstance(removed,dict):
# remove a single record
removed = [removed]
else:
# convert iterable into a list (to be able to sort it)
removed = [ r for r in removed ]
if not removed:
return 0
_ids = [ r['__id__'] for r in removed ]
_ids.sort()
sql = "DELETE FROM %s WHERE __id__ IN (%s)" %(self.name,
",".join([str(_id) for _id in _ids]))
self.cursor.execute(sql)
return len(removed)
def update(self,record,**kw):
"""Update the record with new keys and values"""
# increment version number
kw["__version__"] = record["__version__"] + 1
vals = self._make_sql_params(kw)
sql = "UPDATE %s SET %s WHERE __id__=%s" %(self.name,
",".join(vals | ),record["__id__"])
self.cursor.execute(sql)
def _make_sql_params(self,kw):
|
"""Make a list of strings to pass to an SQL statement
from the dictionary kw with Python types"""
vals = []
for k,v in kw.iteritems():
vals.append('%s=%s' %(k,self._conv(v)))
return vals
def _conv(self,v):
if isinstance(v,str):
v = v.replace('"','""')
return '"%s"' %v
elif isinstance(v,datetime.date):
return v.strftime("%Y%m%d")
else:
return v
def _make_record(self,row):
"""Make a record dictionary from the result of a fetch_"""
return dict(zip(self.all_fields,row))
def add_field(self,field,default=None):
fname,ftype = field
if fname in self.all_fields:
raise ValueError,'Field "%s" already defined' %fname
sql = "ALTER TABLE %s ADD %s %s" %(self.name,fname,ftype)
if default is not None:
sql += " DEFAULT %s" %self._conv(default)
self.cursor.execute(sql)
self.commit()
self._get_table_info()
def drop_field(self,field):
if field in ["__id__","__version__"]:
raise ValueError,"Can't delete field %s" %field
if not field in self.fields:
raise ValueError,"Field %s not found in base" %field
sql = "ALTER TABLE %s DROP %s" %(self.name,field)
self.cursor.execute(sql)
self._get_table_info()
def __call__(self,**kw):
"""Selection by field values
db(key=value) returns the list of records where r[key] = value"""
for key in kw:
if not key in self.all_fields:
raise ValueError,"Field %s not in the database" %key
vals = self._make_sql_params(kw)
sql = "SELECT * FROM %s WHERE %s" %(self.name,",".join(vals))
self.cursor.execute(sql)
return [self._make_record(row) for row in self.cursor.fetchall() ]
def __getitem__(self,record_id):
"""Direct access by record id"""
sql = "SELECT * FROM %s WHERE __id__=%s" %(self.name,record_id)
self.cursor.execute(sql)
res = self.cursor.fetchone()
if res is None:
raise IndexError,"No record at index %s" %record_id
else:
return self._make_record(res)
def __len__(self):
return len(self.records)
def __delitem__(self,record_id):
"""Delete by record id"""
self.delete(self[record_id])
def __iter__(self):
"""Iteration on the records"""
self.cursor.execute("SELECT * FROM %s" %self.name)
results = [ self._make_record(r) for r in self.cursor.fetchall() ]
return iter(results)
if __name__ == '__main__':
connection = MySQLdb.connect("localhost","root","admin")
cursor = connection.cursor()
cursor.execute("USE test")
db = Base("pydbtest",connection).create(("name","TEXT"),("age","INTEGER"),
("size","REAL"),("birth","DATE"),
mode="override")
try:
db.add_field(("name","TEXT"))
except:
pass
import random
import datetime
names = ['pierre','claire','simon','camille','jean',
'florence','marie-anne']
#db = Base('PyDbLite_test')
#db.create('name','age','size','birth',mode="override")
for i in range(1000):
db.insert(name=random.choice(names),
age=random.randint(7,47),size=random.uniform(1.10,1.95),
birth=datetime.date(1990,10,10))
db.commit()
print 'Record #20 :',db[20]
print '\nRecords with age=30 :'
for rec in [ r for r in db if r["age"]==30 ]:
print '%-10s | %2s | %s' %(rec['name'],rec['age'],round(rec['size'],2))
print "\nSame with __call__"
# same with select
for rec in db(age=30):
print '%-10s | %2s | %s' %(rec['name'],rec['age'],round(rec['size'],2))
print [ r for r in db if r["age"]==30 ] == db(age=30)
raw_input()
db.insert(name=random.choice(names)) # missing fields
print '\nNumber of records with 30 <= age < 33 :',
print sum([1 for r in db if 33 > r['age'] >= 30])
print db.delete([])
d = db.delete([r for r in db if 32> r['age'] >= 30 and r['name']==u'pierre'])
print "\nDeleting %s records with name == 'pierre' and 30 <= age < 32" %d
print '\nAfter deleting records '
for rec in db(age=30):
print '%-10s | %2s | %s' %(rec['name'],rec['age'],round(rec['size'],2))
print '\n',sum([1 for r in db]),'records in the database'
print '\nMake pierre uppercase for age > 27'
for record in ([r for r in db if r['name']=='pierre' and r['age'] >27]) :
db.update(record,name="Pierre")
print len([r for r in db if r['name']=='Pierre']),'Pierre'
print len([r for r in db if r['name']=='pierre']),'pierre'
print len([r for r in db if r['name'] in ['pierre','Pierre']]),'p/Pierre'
print 'is unicode :',isinstance(db[20]['name'],unicode)
db.commit() |
r\n'
' SCSI ID: IET 00010000\n'
' SCSI SN: beaf10\n'
' Size: 0 MB, Block size: 1\n'
' Online: Yes\n'
' Removable media: No\n'
' Prevent removal: No\n'
' Readonly: No\n'
' SWP: No\n'
' Thin-provisioning: No\n'
' Backing store type: null\n'
' Backing store path: None\n'
' Backing store flags:\n'
' LUN: 1\n'
' Type: disk\n'
' SCSI ID: IET 00010001\n'
' SCSI SN: beaf11\n'
' Size: 1074 MB, Block size: 512\n'
' Online: Yes\n'
' Removable media: No\n'
' Prevent removal: No\n'
' Readonly: No\n'
' SWP: No\n'
' Thin-provisioning: No\n'
' Backing store type: rdwr\n'
' Backing store path: /dev/stack-volumes-lvmdriver-1/volume-83c2e877-feed-46be-8435-77884fe55b45\n' # noqa
' Backing store flags:\n'
' Account information:\n'
' mDVpzk8cZesdahJC9h73\n'
' ACL information:\n'
' ALL"\n')
def fake_safe_get(self, value):
if value == 'volumes_dir':
return self.fake_volumes_dir
elif value == 'iscsi_protocol':
return self.configuration.iscsi_protocol
def test_iscsi_protocol(self):
self.assertEqual(self.target.iscsi_protocol, 'iscsi')
def test_get_target(self):
def _fake_execute(*args, **kwargs):
return self.fake_iscsi_scan, None
self.stubs.Set(utils,
'execute',
_fake_execute)
self.assertEqual('1',
self.target._get_target('iqn.2010-10.org.openstack:'
'volume-83c2e877-feed-46be-'
'8435-77884fe55b45'))
def test_verify_backing_lun(self):
def _fake_execute(*args, **kwargs):
return self.fake_iscsi_scan, None
self.stubs.Set(utils,
'execute',
_fake_execute)
self.assertTrue(self.target._verify_backing_lun(
'iqn.2010-10.org.openstack:'
'volume-83c2e877-feed-46be-'
'8435-77884fe55b45', '1'))
# Test the failure case
bad_scan = self.fake_iscsi_scan.replace('LUN: 1', 'LUN: 3')
def _fake_execute_bad_lun(*args, **kwargs):
return bad_scan, None
self.stubs.Set(utils,
'execute',
_fake_execute_bad_lun)
self.assertFalse(self.target._verify_backing_lun(
'iqn.2010-10.org.openstack:'
'volume-83c2e877-feed-46be-'
'8435-77884fe55b45', '1'))
def test_get_target_chap_auth(self):
persist_file =\
'<target iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45>\n'\
' backing-store /dev/stack-volumes-lvmdriver-1/volume-83c2e877-feed-46be-8435-77884fe55b45\n'\
' driver iscsi\n'\
' incominguser otzLy2UYbYfnP4zXLG5z 234Zweo38VGBBvrpK9nt\n'\
' write-cache on\n'\
'</target>'
test_vol =\
'iqn.2010-10.org.openstack:'\
'volume-83c2e877-feed-46be-8435-77884fe55b45'
with open(os.path.join(self.fake_volumes_dir,
test_vol.split(':')[1]),
'wb') as tmp_file:
tmp_file.write(persist_file)
expected = ('otzLy2UYbYfnP4zXLG5z', '234Zweo38VGBBvrpK9nt')
self.assertEqual(expected, self.target._get_target_chap_auth(test_vol))
def test_create_iscsi_target(self):
def _fake_execute(*args, **kwargs):
return '', ''
self.stubs.Set(utils,
'execute',
_fake_execute)
self.stubs.Set(self.target,
'_get_target',
lambda x: 1)
self.stubs.Set(self.target,
'_verify_backing_lun',
lambda x, y: True)
test_vol = 'iqn.2010-10.org.openstack:'\
'volume-83c2e877-feed-46be-8435-77884fe55b45'
self.assertEqual(
1,
self.target.create_iscsi_target(
test_vol,
1,
0,
self.fake_volumes_dir))
def test_create_iscsi_target_already_exists(self):
def _fake_execute(*args, **kwargs):
if 'update' in args:
raise putils.ProcessExecutionError(
exit_code=1,
stdout='',
stderr='target already exists',
cmd='tgtad --lld iscsi --op show --mode target')
else:
return 'fake out', 'fake err'
self.stubs.Set(utils,
'execute',
_fake_execute)
self.stubs.Set(self.target,
'_get_target',
lambda x: 1)
self.stubs.Set(self.target,
'_verify_backing_lun',
lambda x, y: True)
test_vol = 'iqn.2010-10.org.openstack:'\
'volume-83c2e877-feed-46be-8435-77884fe55b45'
self.assertEqual(
1,
self.target.create_iscsi_target(
test_vol,
1,
0,
self.fake_volumes_dir))
def test_create_export(self):
def _fake_execute(*args, **kwargs):
return '', ''
self.stubs.Set(utils,
'execute',
_fake_execute)
self.stubs.Set(self.target,
'_get_target',
lambda x: 1)
self.stubs.Set(self.target,
'_verify_backing_lun',
lambda x, y: True)
self.stubs.Set(self.target,
'_get_target_chap_auth',
lambda x: None)
self.stubs.Set(vutils,
'generate_username',
lambda: 'QZJbisGmn9AL954FNF4D')
self.stubs.Set(vutils,
'generate_password',
lambda: 'P68eE7u9eFqDGexd28DQ')
expected_result = {'location': '10.9.8.7:3260,1 '
'iqn.2010-10.org.openstack:testvol 1',
'auth': 'CHAP '
'QZJbisGmn9AL954FNF4D P68eE7u9eFqDGexd28DQ'}
ctxt = context.get_admin_context()
self.assertEqual(expected_result,
self.target.create_export(ctxt,
self.testvol_1,
self.fake_volumes_dir))
self.stubs.Set(self.target,
'_get_target_chap_auth',
lambda x: ('otzLy2UYbYfnP4zXLG5z',
'234Zweo38VGBBvrpK9nt'))
expected_result['auth'] = ('CHAP '
'otzLy2UYbYfnP4zXLG5z 234Zweo38VGBBvrpK9nt')
self.assertEqual(expected_result,
self.target.create_export(ctxt,
| self.testvol_1,
self.fake_volumes_dir))
def test_ensure_export(self):
ctxt = context.get_admin_context()
| with mock.patch.object(self.target, 'create_iscsi_target'):
self.target.ensure_export(ctxt,
self.testvol_1,
self.fake_volumes_dir)
self.target.create_iscsi_target.assert_called_on |
.exp(x1_k)
z = tf.reduce_sum(u_k)
return tf.log(z) + m
def linear(x, out_size, do_bias=True, alpha=1.0, identity_if_possible=False,
normalized=False, name=None, c | ollections=None):
"""Linear (affine) transformation, y = x W + b, for a variety of
configurations.
Args:
x: input The tensor to tranformation.
out_size: The int | eger size of non-batch output dimension.
do_bias (optional): Add a learnable bias vector to the operation.
alpha (optional): A multiplicative scaling for the weight initialization
of the matrix, in the form \alpha * 1/\sqrt{x.shape[1]}.
identity_if_possible (optional): just return identity,
if x.shape[1] == out_size.
normalized (optional): Option to divide out by the norms of the rows of W.
name (optional): The name prefix to add to variables.
collections (optional): List of additional collections. (Placed in
tf.GraphKeys.GLOBAL_VARIABLES already, so no need for that.)
Returns:
In the equation, y = x W + b, returns the tensorflow op that yields y.
"""
in_size = int(x.get_shape()[1]) # from Dimension(10) -> 10
stddev = alpha/np.sqrt(float(in_size))
mat_init = tf.random_normal_initializer(0.0, stddev)
wname = (name + "/W") if name else "/W"
if identity_if_possible and in_size == out_size:
# Sometimes linear layers are nothing more than size adapters.
return tf.identity(x, name=(wname+'_ident'))
W,b = init_linear(in_size, out_size, do_bias=do_bias, alpha=alpha,
normalized=normalized, name=name, collections=collections)
if do_bias:
return tf.matmul(x, W) + b
else:
return tf.matmul(x, W)
def init_linear(in_size, out_size, do_bias=True, mat_init_value=None,
bias_init_value=None, alpha=1.0, identity_if_possible=False,
normalized=False, name=None, collections=None):
"""Linear (affine) transformation, y = x W + b, for a variety of
configurations.
Args:
in_size: The integer size of the non-batc input dimension. [(x),y]
out_size: The integer size of non-batch output dimension. [x,(y)]
do_bias (optional): Add a learnable bias vector to the operation.
mat_init_value (optional): numpy constant for matrix initialization, if None
, do random, with additional parameters.
alpha (optional): A multiplicative scaling for the weight initialization
of the matrix, in the form \alpha * 1/\sqrt{x.shape[1]}.
identity_if_possible (optional): just return identity,
if x.shape[1] == out_size.
normalized (optional): Option to divide out by the norms of the rows of W.
name (optional): The name prefix to add to variables.
collections (optional): List of additional collections. (Placed in
tf.GraphKeys.GLOBAL_VARIABLES already, so no need for that.)
Returns:
In the equation, y = x W + b, returns the pair (W, b).
"""
if mat_init_value is not None and mat_init_value.shape != (in_size, out_size):
raise ValueError(
'Provided mat_init_value must have shape [%d, %d].'%(in_size, out_size))
if bias_init_value is not None and bias_init_value.shape != (1,out_size):
raise ValueError(
'Provided bias_init_value must have shape [1,%d].'%(out_size,))
if mat_init_value is None:
stddev = alpha/np.sqrt(float(in_size))
mat_init = tf.random_normal_initializer(0.0, stddev)
wname = (name + "/W") if name else "/W"
if identity_if_possible and in_size == out_size:
return (tf.constant(np.eye(in_size).astype(np.float32)),
tf.zeros(in_size))
# Note the use of get_variable vs. tf.Variable. this is because get_variable
# does not allow the initialization of the variable with a value.
if normalized:
w_collections = [tf.GraphKeys.GLOBAL_VARIABLES, "norm-variables"]
if collections:
w_collections += collections
if mat_init_value is not None:
w = tf.Variable(mat_init_value, name=wname, collections=w_collections)
else:
w = tf.get_variable(wname, [in_size, out_size], initializer=mat_init,
collections=w_collections)
w = tf.nn.l2_normalize(w, dim=0) # x W, so xW_j = \sum_i x_bi W_ij
else:
w_collections = [tf.GraphKeys.GLOBAL_VARIABLES]
if collections:
w_collections += collections
if mat_init_value is not None:
w = tf.Variable(mat_init_value, name=wname, collections=w_collections)
else:
w = tf.get_variable(wname, [in_size, out_size], initializer=mat_init,
collections=w_collections)
b = None
if do_bias:
b_collections = [tf.GraphKeys.GLOBAL_VARIABLES]
if collections:
b_collections += collections
bname = (name + "/b") if name else "/b"
if bias_init_value is None:
b = tf.get_variable(bname, [1, out_size],
initializer=tf.zeros_initializer(),
collections=b_collections)
else:
b = tf.Variable(bias_init_value, name=bname,
collections=b_collections)
return (w, b)
def write_data(data_fname, data_dict, use_json=False, compression=None):
"""Write data in HD5F format.
Args:
data_fname: The filename of teh file in which to write the data.
data_dict: The dictionary of data to write. The keys are strings
and the values are numpy arrays.
use_json (optional): human readable format for simple items
compression (optional): The compression to use for h5py (disabled by
default because the library borks on scalars, otherwise try 'gzip').
"""
dir_name = os.path.dirname(data_fname)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
if use_json:
the_file = open(data_fname,'w')
json.dump(data_dict, the_file)
the_file.close()
else:
try:
with h5py.File(data_fname, 'w') as hf:
for k, v in data_dict.items():
clean_k = k.replace('/', '_')
if clean_k is not k:
print('Warning: saving variable with name: ', k, ' as ', clean_k)
else:
print('Saving variable with name: ', clean_k)
hf.create_dataset(clean_k, data=v, compression=compression)
except IOError:
print("Cannot open %s for writing.", data_fname)
raise
def read_data(data_fname):
""" Read saved data in HDF5 format.
Args:
data_fname: The filename of the file from which to read the data.
Returns:
A dictionary whose keys will vary depending on dataset (but should
always contain the keys 'train_data' and 'valid_data') and whose
values are numpy arrays.
"""
try:
with h5py.File(data_fname, 'r') as hf:
data_dict = {k: np.array(v) for k, v in hf.items()}
return data_dict
except IOError:
print("Cannot open %s for reading." % data_fname)
raise
def write_datasets(data_path, data_fname_stem, dataset_dict, compression=None):
"""Write datasets in HD5F format.
This function assumes the dataset_dict is a mapping ( string ->
to data_dict ). It calls write_data for each data dictionary,
post-fixing the data filename with the key of the dataset.
Args:
data_path: The path to the save directory.
data_fname_stem: The filename stem of the file in which to write the data.
dataset_dict: The dictionary of datasets. The keys are strings
and the values data dictionaries (str -> numpy arrays) associations.
compression (optional): The compression to use for h5py (disabled by
default because the library borks on scalars, otherwise try 'gzip').
"""
full_name_stem = os.path.join(data_path, data_fname_stem)
for s, data_dict in dataset_dict.items():
write_data(full_name_stem + "_" + s, data_dict, compression=compression)
def read_datasets(data_path, data_fname_stem):
"""Read dataset sin HD5F format.
This function assumes the dataset_dict is a mapping ( string ->
to data_dict ). It calls write_data for each data dictionary,
post-fixing the data filename with the key of the dataset.
Args:
data_path: The path to the save directory.
data_fname_stem: The filename stem of the file in which to write the data.
"""
dataset_dict = {}
fnames = os.listdir(d |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ | = 'D', seed = 0, trendtype = "PolyTrend", cycle_len | gth = 0, transform = "Anscombe", sigma = 0.0, exog_count = 20, ar_order = 12); |
from sympy.external import i | mport_module
from sympy.utilities.pytest import warns |
# fixes issue that arose in addressing issue 6533
def test_no_stdlib_collections():
'''
make sure we get the right collections when it is not part of a
larger list
'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
assert collections != matplotlib.collections
def test_no_stdlib_collections2():
'''
make sure we get the right collections when it is not part of a
larger list
'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
assert collections != matplotlib.collections
def test_no_stdlib_collections3():
'''make sure we get the right collections with no catch'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['cm', 'collections']},
min_module_version='1.1.0')
if matplotlib:
assert collections != matplotlib.collections
def test_min_module_version_python3_basestring_error():
with warns(UserWarning):
import_module('mpmath', min_module_version='1000.0.1')
|
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class PrepaymentBonusResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, name=None, id_product=None, id_product_attribute=None, amount=None, type=None, date_add=None, date_upd=None):
"""
PrepaymentBonusResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'name': 'str',
'id_product': 'int',
'id_product_attribute': 'int',
'amount': 'float',
'type': 'str',
'date_add': 'str',
'date_upd': 'str'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'id_product': 'id_product',
'id_product_attribute': 'id_product_attribute',
'amount': 'amount',
'type': 'type',
'date_add': 'date_add',
'date_upd': 'date_upd'
}
self._id = id
self._name = name
self._id_product = id_product
self._id_product_attribute = id_product_attribute
self._amount = amount
self._type = type
self._date_add = date_add
self._date_upd = date_upd
@property
def id(self):
"""
Gets the id of this PrepaymentBonusResponse.
:return: The id of this PrepaymentBonus | Response.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this PrepaymentBonusResponse.
:param id: The id of th | is PrepaymentBonusResponse.
:type: int
"""
self._id = id
@property
def name(self):
"""
Gets the name of this PrepaymentBonusResponse.
:return: The name of this PrepaymentBonusResponse.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this PrepaymentBonusResponse.
:param name: The name of this PrepaymentBonusResponse.
:type: str
"""
self._name = name
@property
def id_product(self):
"""
Gets the id_product of this PrepaymentBonusResponse.
:return: The id_product of this PrepaymentBonusResponse.
:rtype: int
"""
return self._id_product
@id_product.setter
def id_product(self, id_product):
"""
Sets the id_product of this PrepaymentBonusResponse.
:param id_product: The id_product of this PrepaymentBonusResponse.
:type: int
"""
self._id_product = id_product
@property
def id_product_attribute(self):
"""
Gets the id_product_attribute of this PrepaymentBonusResponse.
:return: The id_product_attribute of this PrepaymentBonusResponse.
:rtype: int
"""
return self._id_product_attribute
@id_product_attribute.setter
def id_product_attribute(self, id_product_attribute):
"""
Sets the id_product_attribute of this PrepaymentBonusResponse.
:param id_product_attribute: The id_product_attribute of this PrepaymentBonusResponse.
:type: int
"""
self._id_product_attribute = id_product_attribute
@property
def amount(self):
"""
Gets the amount of this PrepaymentBonusResponse.
:return: The amount of this PrepaymentBonusResponse.
:rtype: float
"""
return self._amount
@amount.setter
def amount(self, amount):
"""
Sets the amount of this PrepaymentBonusResponse.
:param amount: The amount of this PrepaymentBonusResponse.
:type: float
"""
self._amount = amount
@property
def type(self):
"""
Gets the type of this PrepaymentBonusResponse.
:return: The type of this PrepaymentBonusResponse.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this PrepaymentBonusResponse.
:param type: The type of this PrepaymentBonusResponse.
:type: str
"""
self._type = type
@property
def date_add(self):
"""
Gets the date_add of this PrepaymentBonusResponse.
:return: The date_add of this PrepaymentBonusResponse.
:rtype: str
"""
return self._date_add
@date_add.setter
def date_add(self, date_add):
"""
Sets the date_add of this PrepaymentBonusResponse.
:param date_add: The date_add of this PrepaymentBonusResponse.
:type: str
"""
self._date_add = date_add
@property
def date_upd(self):
"""
Gets the date_upd of this PrepaymentBonusResponse.
:return: The date_upd of this PrepaymentBonusResponse.
:rtype: str
"""
return self._date_upd
@date_upd.setter
def date_upd(self, date_upd):
"""
Sets the date_upd of this PrepaymentBonusResponse.
:param date_upd: The date_upd of this PrepaymentBonusResponse.
:type: str
"""
self._date_upd = date_upd
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
import pygame
from pygame.locals import *
import random
import itertools
import state
import block
import tetros
import states
from text import Text
from colors import Colors
from engine import Engine
from playfield import Playfield
from countdown import Countdown
class GameState(state.State):
tetro_classes = (tetros.Leftsnake, tetros.Rightsnake, tetros.Stick,
tetros.Square, tetros.Tee, tetros.Leftgun,
tetros.Rightgun)
tetro_colors = (Colors.ORANGE, Colors.RED, Colors.BLUE, Colors.YELLOW)
def __init__(self):
super(GameState, self).__init__()
self.falling_tetro = None
# nrows should be 22
self.playfield = Playfield(10, 15)
self.playfield.rect.centerx = Engine.screenrect.centerx
self.playfield.rect.bottom = Engine.screenrect.bottom - block.SIZE
self.members.append(self.playfield)
#
# self.kill()
# # start a countdown, and revive ourself when done
# self.intro = Countdown(3000, 256, self.revive)
# self.intro.rect.center = Engine.screenrect.center
# self.members.append(self.intro)
def update(self):
# escape back to main menu
if Engine.is_just_pressed(K_ESCAPE):
Engine.switch(states.MainMenuState())
if not self.alive:
super(GameState, self).update()
return
# update falling tetro
# X movements
if self.falling_tetro is not None:
dx = 0
#
if Engine.pressed(K_LEFT):
dx = -block.SIZE
if Engine.pressed(K_RIGHT):
dx = block.SIZE
#
if dx != 0:
self.falling_tetro.move(dx, 0)
# move it back if any of it's block are now outside the
# playfield
for tblock in self.falling_tetro.members:
if (tblock.rect.x < self.playfield.rect.x
or tblock.rect.right > self.playfield.rect.right):
self.falling_tetro.move(-dx, 0)
break
else:
# not colliding with "walls" check against well blocks
well_blocks = self.playfield.get_well_blocks()
for tblock, wblock in itertools.product(
self.falling_tetro.members, well_blocks):
if tblock.rect.colliderect(wblock.rect):
# move it back and land
self.falling_tetro.move(-dx, 0)
break
else:
self.falling_tetro.col += 1 if dx > 0 else -1
# Y movements
if (self.falling_tetro is not None and self.falling_tetro.dropping):
self.falling_tetro.drop_delay_counter += Engine.elapsed
if self.falling_tetro.drop_delay_counter > self.falling_tetro.drop_delay:
# move and check for collisions
dy = block.SIZE
self.falling_tetro.move(0, dy)
#
well_blocks = self.playfield.get_well_blocks()
# collision with well bottom
for tblock in self.falling_tetro.members:
if tblock.rect.bottom > self.playfield.rect.bottom:
# move it back and land
self.falling_tetro.move(0, -dy)
if self.falling_tetro.row < 0:
self.kill()
return
self.falling_tetro.land(self.playfield)
self.falling_tetro = None
break
else:
# collision with blocks in the well
for tblock, wblock in itertools.product(
self.falling_tetro.members, well_blocks):
if tblock.rect.colliderect(wblock.rect):
# move it back and land
self.falling_tetro.move(0, -dy)
if self.falling_tetro.row < 0:
self.kill()
return
self.falling_tetro.land(self.playfield)
| self.falling_tetro = None
break
else:
# update row
self.falling_tetro.row += 1
# reset counter
self.falling_tetro.drop_delay_counter = 0
# new tetro | if needed
if self.falling_tetro is None:
color = random.choice(self.tetro_colors)
tetro_cls = random.choice(self.tetro_classes)
#
# not giving the startx-y may get the tetromino and playfield out
# of sync because startx-y default to zero
startx = self.playfield.rect.x + block.SIZE * 4
starty = self.playfield.rect.y - block.SIZE * 4
self.falling_tetro = tetro_cls(color,
startx=startx,
starty=starty,
drop_delay=50)
#
self.members.append(self.falling_tetro)
self.falling_tetro.drop()
super(GameState, self).update()
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
from .estestcase import ESTestCase
from pyes.facets import DateHistogramFacet
from pyes.filters import TermFilter, RangeFilter
from pyes.query import FilteredQuery, MatchAllQuery, Search
from pyes.utils import ESRange
import datetime
class FacetSearchTestCase(ESTestCase):
def setUp(self):
super(FacetSearchTestCase, self).setUp()
mapping = {u'parsedtext': {'boost': 1.0,
'index': 'analyzed',
'store': 'yes',
'type': u'string',
"term_vector": "with_positions_offsets"},
u'name': {'boost': 1.0,
'index': 'analyzed',
'store': 'yes',
'type': u'string',
"term_vector": "with_positions_offsets"},
u'title': {'boost': 1.0,
'index': 'analyzed',
'store': 'yes',
'type': u'string',
"term_vector": "with_positions_offsets"},
u'position': {'store': 'yes',
'type': u'integer'},
u'tag': {'store': 'yes',
'type': u'string'},
u'date': {'store': 'yes',
'type': u'date'},
u'uuid': {'boost': 1.0,
'index': 'not_analyzed',
'store': 'yes',
'type': u'string'}}
self.conn.create_index(self.index_name)
self.conn.put_mapping(self.document_type, {'properties': mapping}, self.index_name)
self.conn.index({"name": "Joe Tester",
"parsedtext": "Joe Testere nice guy",
"uuid": "11111",
"position": 1,
"tag": "foo",
"date": datetime.date(2011, 5, 16)},
self.index_name, self.document_type, 1)
self.conn.index({"name": " Bill Baloney",
"parsedtext": "Bill Testere nice guy",
"uuid": "22222",
"position": 2,
| "tag": "foo",
"date": datetime.date(2011, 4, 16)},
self.index_name, self.document_type, 2)
self.conn.index({"name": "Bill Clinton",
"parsedtext": " | Bill is not nice guy",
"uuid": "33333",
"position": 3,
"tag": "bar",
"date": datetime.date(2011, 4, 28)},
self.index_name, self.document_type, 3)
self.conn.refresh(self.index_name)
def test_terms_facet(self):
q = MatchAllQuery()
q = q.search()
q.facet.add_term_facet('tag')
resultset = self.conn.search(query=q, indices=self.index_name, doc_types=[self.document_type])
self.assertEquals(resultset.total, 3)
self.assertEquals(resultset.facets.tag.terms, [{u'count': 2, u'term': u'foo'},
{u'count': 1, u'term': u'bar'}])
q2 = MatchAllQuery()
q2 = q2.search()
q2.facet.add_term_facet('tag')
q3 = MatchAllQuery()
q3 = q3.search()
q3.facet.add_term_facet('tag')
self.assertEquals(q2, q3)
q4 = MatchAllQuery()
q4 = q4.search()
q4.facet.add_term_facet('bag')
self.assertNotEquals(q2, q4)
def test_terms_facet_filter(self):
q = MatchAllQuery()
q = FilteredQuery(q, TermFilter('tag', 'foo'))
q = q.search()
q.facet.add_term_facet('tag')
resultset = self.conn.search(query=q, indices=self.index_name, doc_types=[self.document_type])
self.assertEquals(resultset.total, 2)
self.assertEquals(resultset.facets['tag']['terms'], [{u'count': 2, u'term': u'foo'}])
self.assertEquals(resultset.facets.tag.terms, [{u'count': 2, u'term': u'foo'}])
q2 = MatchAllQuery()
q2 = FilteredQuery(q2, TermFilter('tag', 'foo'))
q2 = q2.search()
q2.facet.add_term_facet('tag')
q3 = MatchAllQuery()
q3 = FilteredQuery(q3, TermFilter('tag', 'foo'))
q3 = q3.search()
q3.facet.add_term_facet('tag')
self.assertEquals(q2, q3)
q4 = MatchAllQuery()
q4 = FilteredQuery(q4, TermFilter('tag', 'foo'))
q4 = q4.search()
q4.facet.add_term_facet('bag')
self.assertNotEquals(q3, q4)
def test_date_facet(self):
q = MatchAllQuery()
q = q.search()
q.facet.facets.append(DateHistogramFacet('date_facet',
field='date',
interval='month'))
resultset = self.conn.search(query=q, indices=self.index_name, doc_types=[self.document_type])
self.assertEquals(resultset.total, 3)
self.assertEquals(resultset.facets.date_facet.entries, [{u'count': 2, u'time': 1301616000000},
{u'count': 1, u'time': 1304208000000}])
self.assertEquals(datetime.datetime.fromtimestamp(1301616000000 / 1000.).date(),
datetime.date(2011, 04, 01))
self.assertEquals(datetime.datetime.fromtimestamp(1304208000000 / 1000.).date(),
datetime.date(2011, 05, 01))
def test_date_facet_filter(self):
q = MatchAllQuery()
q = FilteredQuery(q, RangeFilter(qrange=ESRange('date',
datetime.date(2011, 4, 1),
datetime.date(2011, 5, 1),
include_upper=False)))
q = q.search()
q.facet.facets.append(DateHistogramFacet('date_facet',
field='date',
interval='month'))
resultset = self.conn.search(query=q, indices=self.index_name, doc_types=[self.document_type])
self.assertEquals(resultset.total, 2)
self.assertEquals(resultset.facets['date_facet']['entries'], [{u'count': 2, u'time': 1301616000000}])
if __name__ == "__main__":
unittest.main()
|
t)
if not default.get('name'):
default['name'] = _("%s (copy)") % current.name
if 'remaining_hours' not in default:
default['remaining_hours'] = current.planned_hours
return super(task, self).copy_data(cr, uid, id, default, context)
_columns = {
'active': fields.boolean('Active'),
'name': fields.char('Task Title', track_visibility='onchange', size=128, required=True, select=True),
'description': fields.html('Description'),
'priority': fields.selection([('0','Normal'), ('1','High')], 'Priority', select=True),
'sequence': fields.integer('Sequence', select=True, help="Gives the sequence order when displaying a list of tasks."),
'stage_id': fields.many2one('project.task.type', 'Stage', track_visibility='onchange', select=True,
domain="[('project_ids', '=', project_id)]", copy=False),
'tag_ids': fields.many2many('project.tags', string='Tags', oldname='categ_ids'),
'kanban_state': fields.selection([('normal', 'In Progress'),('done', 'Ready for next stage'),('blocked', 'Blocked')], 'Kanban State',
track_visibility='onchange',
help="A task's kanban state indicates special situations affecting it:\n"
" * Normal is the default situation\n"
" * Blocked indicates something is preventing the progress of this task\n"
" * Ready for next stage indicates the task is ready to be pulled to the next stage",
required=True, copy=False),
'create_date': fields.datetime('Create Date', readonly=True, select=True),
'write_date': fields.datetime('Last Modification Date', readonly=True, select=True), #not displayed in the view but it might be useful with base_action_rule module (and it needs to be defined first for that)
'date_start': fields.datetime('Starting Date', select=True, copy=False),
'date_end': fields.datetime('Ending Date', select=True, copy=False),
'date_assign': fields.datetime('Assigning Date', select=True, copy=False, readonly=True),
'date_deadline': fields.date('Deadline', select=True, copy=False),
'date_last_stage_update': fields.datetime('Last Stage Update', select=True, copy=False, readonly=True),
'project_id': fields.many2one('project.project', 'Project', ondelete='set null', select=True, track_visibility='onchange', change_default=True),
'parent_ids': fields.many2many('project.task', 'project_task_parent_rel', 'task_id', 'parent_id', 'Parent Tasks'),
'child_ids': fields.many2many('project.task', 'project_task_parent_rel', 'parent_id', 'task_id', 'Delegated Tasks'),
'notes': fields.text('Notes'),
'planned_hours': fields.float('Initially Planned Hours', help='Estimated time to do the task, usually set by the project manager when the task is in draft state.'),
'remaining_hours': fields.float('Remaining Hours', digits=(16,2), help="Total remaining time, can be re-estimated periodically | by the assignee of the task."),
'user_id': fields.many2one('res.users', 'Assigned to', select=True, track_visibility='onchange'),
'partner_id': fields.many2one('res.partner', 'Customer'),
'manager_id': fields.related('project_id', 'user_id', type='many2one', relation='res.users', string='Project Manager'),
'company_id': fields.many2one('res.company', 'Company'),
'id': fields.integer('ID', readonly=Tru | e),
'color': fields.integer('Color Index'),
'user_email': fields.related('user_id', 'email', type='char', string='User Email', readonly=True),
'attachment_ids': fields.one2many('ir.attachment', 'res_id', domain=lambda self: [('res_model', '=', self._name)], auto_join=True, string='Attachments'),
# In the domain of displayed_image_id, we couln't use attachment_ids because a one2many is represented as a list of commands so we used res_model & res_id
'displayed_image_id': fields.many2one('ir.attachment', domain="[('res_model', '=', 'project.task'), ('res_id', '=', id), ('mimetype', 'ilike', 'image')]", string='Displayed Image'),
'legend_blocked': fields.related("stage_id", "legend_blocked", type="char", string='Kanban Blocked Explanation'),
'legend_done': fields.related("stage_id", "legend_done", type="char", string='Kanban Valid Explanation'),
'legend_normal': fields.related("stage_id", "legend_normal", type="char", string='Kanban Ongoing Explanation'),
}
_defaults = {
'stage_id': _get_default_stage_id,
'project_id': lambda self, cr, uid, ctx=None: ctx.get('default_project_id') if ctx is not None else False,
'date_last_stage_update': fields.datetime.now,
'kanban_state': 'normal',
'priority': '0',
'sequence': 10,
'active': True,
'user_id': lambda obj, cr, uid, ctx=None: uid,
'company_id': lambda self, cr, uid, ctx=None: self.pool.get('res.company')._company_default_get(cr, uid, 'project.task', context=ctx),
'partner_id': lambda self, cr, uid, ctx=None: self._get_default_partner(cr, uid, context=ctx),
'date_start': fields.datetime.now,
}
_order = "priority desc, sequence, date_start, name, id"
def _check_recursion(self, cr, uid, ids, context=None):
for id in ids:
visited_branch = set()
visited_node = set()
res = self._check_cycle(cr, uid, id, visited_branch, visited_node, context=context)
if not res:
return False
return True
def _check_cycle(self, cr, uid, id, visited_branch, visited_node, context=None):
if id in visited_branch: #Cycle
return False
if id in visited_node: #Already tested don't work one more time for nothing
return True
visited_branch.add(id)
visited_node.add(id)
#visit child using DFS
task = self.browse(cr, uid, id, context=context)
for child in task.child_ids:
res = self._check_cycle(cr, uid, child.id, visited_branch, visited_node, context=context)
if not res:
return False
visited_branch.remove(id)
return True
def _check_dates(self, cr, uid, ids, context=None):
if context == None:
context = {}
obj_task = self.browse(cr, uid, ids[0], context=context)
start = obj_task.date_start or False
end = obj_task.date_end or False
if start and end :
if start > end:
return False
return True
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive tasks.', ['parent_ids']),
(_check_dates, 'Error ! Task starting date must be lower than its ending date.', ['date_start','date_end'])
]
# Override view according to the company definition
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
users_obj = self.pool.get('res.users')
if context is None: context = {}
# read uom as admin to avoid access rights issues, e.g. for portal/share users,
# this should be safe (no context passed to avoid side-effects)
obj_tm = users_obj.browse(cr, SUPERUSER_ID, uid, context=context).company_id.project_time_mode_id
tm = obj_tm and obj_tm.name or 'Hours'
res = super(task, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
# read uom as admin to avoid access rights issues, e.g. for portal/share users,
# this should be safe (no context passed to avoid side-effects)
obj_tm = users_obj.browse(cr, SUPERUSER_ID, uid, context=context).company_id.project_time_mode_id
try:
# using get_object to get translation value
uom_hour = self.pool['ir.model.data'].get_object(cr, uid, 'product', ' |
ls.ForeignKey(UserProfile, related_name='user_target')
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Category(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the category name!"})
parent = models.ForeignKey('self', null=True)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Project(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the project name!"})
start_date = models.DateTimeField(auto_now_add=True, auto_now=False)
end_date = models.DateTimeField(auto_now_add=True, auto_now=False)
owner = models.ForeignKey(Requester, related_name='project_owner')
description = models.CharField(max_length=1024, default='')
collaborators = models.ManyToManyField(Requester, through='ProjectRequester')
keywords = models.TextField(null=True)
save_to_drive = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
categories = models.ManyToManyField(Category, through='ProjectCategory')
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class ProjectRequester(models.Model):
"""
Tracks the list of requesters that collaborate on a specific project
"""
requester = models.ForeignKey(Requester)
project = models.ForeignKey(Project)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('requester', 'project')
class Template(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the template name!"})
owner = models.ForeignKey(UserProfile)
source_html = models.TextField(default=None, null=True)
price = models.FloatField(default=0)
share_with_others = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Module(models.Model):
"""
aka Milestone
This is a group of similar tasks of the same kind.
Fields
-repetition: number of times a task needs to be performed
"""
name = models.CharField(max_length=128, error_messages={'required': "Please enter the module name!"})
description = models.TextField(error_messages={'required': "Please enter the module description!"})
owner = models.ForeignKey(Requester)
project = models.ForeignKey(Project, related_name='modules')
categories = models.ManyToManyField(Category, through='ModuleCategory')
keywords = models.TextField(null=True)
# TODO: To be refined
statuses = ((1, "Created"),
(2, 'In Review'),
(3, 'In Progress'),
(4, 'Completed')
)
status = models.IntegerField(choices=statuses, default=1)
price = models.FloatField()
repetition = models.IntegerField(default=1)
module_timeout = models.Intege | rField(default=0)
has_data_set = models.BooleanField(default=False)
data_set_location = models.CharField(max_le | ngth=256, default='No data set', null=True)
task_time = models.FloatField(default=0) # in minutes
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
template = models.ManyToManyField(Template, through='ModuleTemplate')
is_micro = models.BooleanField(default=True)
is_prototype = models.BooleanField(default=False)
class ModuleCategory(models.Model):
module = models.ForeignKey(Module)
category = models.ForeignKey(Category)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('category', 'module')
class ProjectCategory(models.Model):
project = models.ForeignKey(Project)
category = models.ForeignKey(Category)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('project', 'category')
class TemplateItem(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the name of the template item!"})
template = models.ForeignKey(Template, related_name='template_items')
id_string = models.CharField(max_length=128)
role = models.CharField(max_length=16)
icon = models.CharField(max_length=256, null=True)
data_source = models.CharField(max_length=256, null=True)
layout = models.CharField(max_length=16, default='column')
type = models.CharField(max_length=16)
sub_type = models.CharField(max_length=16)
values = models.TextField(null=True)
position = models.IntegerField()
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
ordering = ['position']
class ModuleTemplate(models.Model):
module = models.ForeignKey(Module)
template = models.ForeignKey(Template)
class TemplateItemProperties(models.Model):
template_item = models.ForeignKey(TemplateItem)
attribute = models.CharField(max_length=128)
operator = models.CharField(max_length=128)
value1 = models.CharField(max_length=128)
value2 = models.CharField(max_length=128)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Task(models.Model):
module = models.ForeignKey(Module, related_name='module_tasks')
# TODO: To be refined
statuses = ((1, "Created"),
(2, 'Accepted'),
(3, 'Assigned'),
(4, 'Finished')
)
status = models.IntegerField(choices=statuses, default=1)
data = models.TextField(null=True)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
price = models.FloatField(default=0)
class TaskWorker(models.Model):
task = models.ForeignKey(Task, related_name='task_workers')
worker = models.ForeignKey(Worker)
statuses = ((1, 'Created'),
(2, 'In Progress'),
(3, 'Accepted'),
(4, 'Rejected'),
(5, 'Returned'),
(6, 'Skipped')
)
task_status = models.IntegerField(choices=statuses, default=1)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class TaskWorkerResult(models.Model):
task_worker = models.ForeignKey(TaskWorker, related_name='task_worker_results')
result = models.TextField()
template_item = models.ForeignKey(TemplateItem)
# TODO: To be refined
statuses = ((1, 'Created'),
(2, 'Accepted'),
(3, 'Rejected')
)
status = models.IntegerField(choices=statuses, default=1)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class WorkerModuleApplication(models.Model):
worker = models.ForeignKey(Worker)
module = models.ForeignKey(Module)
|
from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import *
import wizzat.testutil
import wizzat.pghelper
class DBTestCase(wizzat.testutil.TestCase):
db_info = {
'host' : 'localhost',
'port' : 5432,
'user' : 'wizzat',
'password' : 'wizzat',
'database' : 'wizzatpy_testdb',
'autocommit' : False,
}
db_mgr = wizzat.pghelper.ConnMgr(db_info,
max_objs = 3,
)
def con | n(self, name = | 'testconn'):
conn = self.db_mgr.name(name)
conn.autocommit = True
return conn
|
"""A block Davidson solver for finding a fixed number of eigenvalues.
Adapt | ed from https://joshuagoings.com/2013/08/23/davidsons-method/
"""
import time
from typing import Tuple
import numpy as np
from tqdm import tqdm
def davidson(A: np.ndarray, k: int, eig: int) -> Tuple[np.ndarray, np.ndarray]:
assert len(A.shape) == 2
assert A.shape[0] == A.shape[1]
n = A.shape[0]
## set up subspace and trial vectors
# set of k unit vectors as guess
t = np.eye(n, k)
# hold guess vectors
V = np.zeros((n, n))
I = np | .eye(n)
for m in tqdm(range(k, mmax, k)):
if m <= k:
for j in range(k):
V[:, j] = t[:, j] / np.linalg.norm(t[:, j])
theta_old = 1
elif m > k:
theta_old = theta[:eig]
V, R = np.linalg.qr(V)
T = V[:, : (m + 1)].T @ A @ V[:, : (m + 1)]
THETA, S = np.linalg.eig(T)
idx = THETA.argsort()
theta = THETA[idx]
s = S[:, idx]
for j in range(k):
w = (A - theta[j] * I) @ V[:, : (m + 1)] @ s[:, j]
q = w / (theta[j] - A[j, j])
V[:, (m + j + 1)] = q
norm = np.linalg.norm(theta[:eig] - theta_old)
if norm < tol:
break
return theta, V
if __name__ == "__main__":
# dimension of problem
n = 1200
# convergence tolerance
tol = 1e-8
# maximum number of iterations
mmax = n // 2
## set up fake Hamiltonian
sparsity = 1.0e-4
A = np.zeros((n, n))
for i in range(0, n):
A[i, i] = i + 1
A = A + sparsity * np.random.randn(n, n)
A = (A.T + A) / 2
# number of initial guess vectors
k = 8
# number of eigenvalues to solve
eig = 4
start_davidson = time.time()
theta, V = davidson(A, k, eig)
end_davidson = time.time()
print(f"davidson = {theta[:eig]}; {end_davidson - start_davidson} seconds")
start_numpy = time.time()
E, Vec = np.linalg.eig(A)
E = np.sort(E)
end_numpy = time.time()
print(f"numpy = {E[:eig]}; {end_numpy - start_numpy} seconds")
|
import tests.units.tournaments
import lib.datalayer
import games
import games.settlers
import tournaments
import hruntime
from tests import *
from tests.units.tournaments import create_and_populate_tournament
class Tests(TestCase):
@classmethod
def setup_class(cls):
super(Tests, cls).setup_class()
hruntime.dbroo | t = lib.datala | yer.Root()
hruntime.dbroot.users['SYSTEM'] = tests.DummyUser('SYSTEM')
def test_sanity(self):
patched_events = {
'tournament.Created': 2,
'tournament.PlayerJoined': 12,
'game.GameCreated': 8,
'game.PlayerJoined': 4,
'game.PlayerInvited': 8
}
with EventPatcherWithCounter(patched_events):
T = create_and_populate_tournament(engine = 'randomized')
|
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import deform
from pyramid.view import view_config
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from pontus.default_behavior import Cancel
from pontus.form import FormView
from pontus.view import BasicView
from pontus.view_operation import MultipleView
from novaideo.content.processes.smart_folder_management.behaviors import (
RemoveSmartFolder)
from novaideo.content.smart_folder import SmartFolder
from novaideo import _
class RemoveSmartFolderViewStudyReport(BasicView):
title = 'Alert for remove'
name = 'alertforremove'
template = 'novaideo:views/smart_folder_management/templates/alert_smartfolder_remove.pt'
def update(self):
result = {}
values = {'context': self.context}
body = self.content(args=values, template=self.template)['body']
item = self.adapt_item(body, self.viewid)
result['coordinates'] = {self.coordinates: [item]}
return result
class RemoveSmartFolderView(FormView):
title = _('Remove')
name = 'removesmartfolderform'
formid = 'formremovesmartfolder'
behaviors = [RemoveSmartFolder, Can | cel]
validate_behaviors = False
def before_update(self):
self.action = self.request.resource_url(
self.context, 'novaideoapi',
query={'op': 'update_action_view',
'node_id': RemoveSmartFolder.node_definition.id})
self.schema.widget = deform.widget.FormWidget(
css_class='deform novaideo-ajax-form')
@view_config(
name='removesmartfolder',
context=SmartFolder,
renderer='pontus:templates/views_templates/grid.pt', |
)
class RemoveSmartFolderViewMultipleView(MultipleView):
title = _('Remove the topic of interest')
name = 'removesmartfolder'
viewid = 'removesmartfolder'
template = 'pontus:templates/views_templates/simple_multipleview.pt'
views = (RemoveSmartFolderViewStudyReport, RemoveSmartFolderView)
validators = [RemoveSmartFolder.get_validator()]
DEFAULTMAPPING_ACTIONS_VIEWS.update(
{RemoveSmartFolder: RemoveSmartFolderViewMultipleView})
|
from .variables import *
def Cell(node):
# cells must stand on own line
if node.parent.cls not in ("Assign", "Assigns"):
node.auxiliary("cell")
return "{", ",", | "}"
def Assign(node):
if node.name == 'varargin':
out = "%(0)s = va_arg(varargin, " + node[0].type + ") ;"
else:
out = "%(0)s.clear() ;"
# append t | o cell, one by one
for elem in node[1]:
out = out + "\n%(0)s.push_back(" + str(elem) + ") ;"
return out
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-26 09:42
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, m | odels
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('exercises', '0004_exercise_author'),
]
operations = [
migrations.AlterField(
model_name='exercise',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=setting | s.AUTH_USER_MODEL),
),
]
|
# Copyright (c) 2017 Cedric Bellegarde <cedric.bellegarde@adishatz.org>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gio
from eolie.define import PROXY_BUS, PROXY_PATH, PROXY_INTERFACE, El
class DBusHelper:
"""
Simpler helper for DBus
"""
def __init__(self):
self.__signals = {}
def call(self, call, page_id, dbus_args=None, callback=None, *args):
"""
Call function
@param call as str
@param page_id as int
@param dbus_args as GLib.Variant()/None
@param callback as function
"""
try:
bus = El().get_dbus_connection()
proxy_bus = PROXY_BUS % page_id
Gio.DBusProxy.new(bus, Gio.DBusProxyFlags.NONE, None,
proxy_bus,
PROXY_PATH,
PROXY_INTERFACE, None,
self.__on_get_proxy,
call, dbus_args, callback, *args)
except Exception as e:
print("DBusHelper::call():", e)
def connect(self, signal, callback, page_id):
"""
Connect callback to object signals
@param signal as str
@param callback as function
@param page_id as int
"""
try:
bus = El().get_dbus_connection()
proxy_bus = PROXY_BUS % page_id
subscribe_id = bus.signal_subscribe(None, proxy_bus, signal,
PROXY_PATH, None,
Gio.DBusSignalFlags.NONE,
callback)
self.__signals[page_id] = (bus, subscribe_id)
except Exception as e:
print("DBusHelper::connect():", e)
def disconnect(self, page_id):
"""
Disconnect signal
@param page_id as int
"""
if page_id in self.__signals.key | s():
(bus, subscribe_id) = self.__signals[page_id]
bus.signal_unsubscribe(subscribe_id)
del self.__signals[page_id]
#######################
# PRIVATE | #
#######################
def __on_get_proxy(self, source, result, call, dbus_args, callback, *args):
"""
Launch call and connect it to callback
@param source as GObject.Object
@param result as Gio.AsyncResult
@param call as str
@param dbus_args as GLib.Variant()/None
@param callback as function
"""
try:
proxy = source.new_finish(result)
proxy.call(call, dbus_args, Gio.DBusCallFlags.NO_AUTO_START,
1000, None, callback, *args)
except Exception as e:
print("DBusHelper::__on_get_proxy():", e)
callback(None, None, *args)
|
from __future__ import print_function, unicode_literals, division, absolute_import
import datetime
import time
import ntplib
from pyotp import utils
from pyotp.otp import OTP
class TOTP(OTP):
systime_offset = None
def __init__(self, *args, **kwargs):
"""
@option options [Integer] interval (30) the time interval in seconds
for OTP This defaults to 30 which is standard.
"""
self.interval = kwargs.pop('interval', 30)
if self.systime_offset is None:
try:
c = ntplib.NTPClient()
TOTP.systime_offset = int(c.request(
'pool.ntp.org', version=3).offset)
except Exception:
self.systime_offset = 0
super(TOTP, self).__init__(*args, **kwargs)
def at(self, for_time, counter_offset=0):
"""
Accepts either a Unix time | stamp integer or a Time object.
Time objects will be adjusted to UTC automatically
@param [Time/Integer] time the time to generate an OTP for
@param [Integer] counter_offset an amount of ticks to add to the time counter
"""
if not isinstance(for_time, datetime.datetime):
for_time = datetime.datetime.fromtime | stamp(int(for_time))
return self.generate_otp(self.timecode(for_time) + counter_offset)
def now(self):
"""
Generate the current time OTP
@return [Integer] the OTP as an integer
"""
return self.generate_otp(self.timecode(datetime.datetime.now()))
def verify(self, otp, for_time=None, valid_window=0):
"""
Verifies the OTP passed in against the current time OTP
@param [String/Integer] otp the OTP to check against
@param [Integer] valid_window extends the validity to this many counter ticks before and after the current one
"""
if for_time is None:
for_time = datetime.datetime.now()
if valid_window:
for i in range(-valid_window, valid_window + 1):
if utils.strings_equal(str(otp), str(self.at(for_time, i))):
return True
return False
return utils.strings_equal(str(otp), str(self.at(for_time)))
def provisioning_uri(self, name, issuer_name=None):
"""
Returns the provisioning URI for the OTP
This can then be encoded in a QR Code and used
to provision the Google Authenticator app
@param [String] name of the account
@return [String] provisioning uri
"""
return utils.build_uri(self.secret, name, issuer_name=issuer_name)
def timecode(self, for_time):
i = time.mktime(for_time.timetuple()) + self.systime_offset
return int(i / self.interval)
|
import csv
from | . import WorksheetBase, WorkbookBase, CellMode
class CSVWorksheet(WorksheetBase):
def __init__(self, raw_sheet, ordinal):
super().__init__(raw_sheet, ordinal)
self.name = "Sheet 1"
self.nrows = len(self.raw_sheet)
self.ncols = max([len(r) for r in self.raw_sheet])
def parse_cell(self, cell, coords, cell_mode=CellMo | de.cooked):
try:
return int(cell)
except ValueError:
pass
try:
return float(cell)
except ValueError:
pass
# TODO Check for dates?
return cell
def get_row(self, row_index):
return self.raw_sheet[row_index]
class CSVWorkbook(WorkbookBase):
def iterate_sheets(self):
with open(self.filename, "r") as rf:
reader = csv.reader(rf)
yield list(reader)
def get_worksheet(self, raw_sheet, index):
return CSVWorksheet(raw_sheet, index)
|
"""The tests for the Graphite component."""
import socket
import unittest
from unittest import mock
import blumate.core as ha
import blumate.components.graphite as graphite
from blumate.const import (
EVENT_STATE_CHANGED,
EVENT_BLUMATE_START, EVENT_BLUMATE_STOP,
STATE_ON, STATE_OFF)
from tests.common import get_test_home_assistant
class TestGraphite(unittest.TestCase):
"""Test the Graphite component."""
def setup_method(self, method):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.config.latitude = 32.87336
self.hass.config.longitude = 117.22743
self.gf = graphite.GraphiteFeeder(self.hass, 'foo', 123, 'bm')
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
@mock.patch('blumate.components.graphite.GraphiteFeeder')
def test_minimal_config(self, mock_gf):
"""Test setup with minimal configuration."""
self.assertTrue(graphite.setup(self.hass, {}))
mock_gf.assert_called_once_with(self.hass, 'localhost', 2003, 'bm')
@mock.patch('blumate.components.graphite.GraphiteFeeder')
def test_full_config(self, mock_gf):
"""Test setup with full configuration."""
config = {
'graphite': {
'host': 'foo',
'port': 123,
'prefix': 'me',
}
}
self.assertTrue(graphite.setup(self.hass, config))
mock_gf.assert_called_once_with(self.hass, 'foo', 123, 'me')
@mock.patch('blumate.components.graphite.GraphiteFeeder')
def test_config_bad_port(self, mock_gf):
"""Test setup with invalid port."""
config = {
'graphite': {
'host': 'foo',
'port': 'wrong',
}
}
self.assertFalse(graphite.setup(self.hass, config))
self.assertFalse(mock_gf.called)
def test_subscribe(self):
"""Test the subscription."""
fake_hass = mock.MagicMock()
gf = graphite.GraphiteFeeder(fake_hass, 'foo', 123, 'bm')
fake_hass.bus.listen_once.has_calls([
mock.call(EVENT_BLUMATE_START, gf.start_listen),
mock.call(EVENT_BLUMATE_STOP, gf.shutdown),
])
fake_hass.bus.listen.assert_called_once_with(
EVENT_STATE_CHANGED, gf.event_listener)
def test_start(self):
"""Test the start."""
with mock.patch.object(self.gf, 'start') as mock_start:
self.gf.start_listen('event')
mock_start.assert_called_once_with()
def test_shutdown(self):
"""Test the shutdown."""
with mock.patch.object(self.gf, '_queue') as mock_queue:
self.gf.shutdown('event')
mock_queue.put.assert_called_once_with(self.gf._quit_object)
def test_event_listener(self):
"""Test the event listener."""
with mock.patch.object(self.gf, '_queue') as mock_queue:
self.gf.event_listener('foo')
mock_queue.put.assert_called_once_with('foo')
@mock.patch('time.time')
def test_report_attributes(self, mock_time):
"""Test the reporting with attributes."""
mock_time.return_value = 12345
attrs = {'foo': 1,
'bar': 2.0,
'baz': True,
'bat': 'NaN',
}
expected = [
'bm.entity.state 0.000000 12345',
'bm.entity.foo 1.000000 12345',
'bm.entity.bar 2.000000 12345',
'bm.entity.baz 1.000000 12345',
]
state = mock.MagicMock(state=0, attributes=attrs)
with mock.patch.object(self.gf, '_send_to_graphite') as mock_send:
self.gf._report_attributes('entity', state)
actual = mock_send.call_args_list[0][0][0].split('\n')
self.assertEqual(sorted(expected), sorted(actual))
@mock.patch('time.time')
def test_report_with_string_state(self, mock_time):
"""Test the reporting with strings."""
mock_time.return_value = 12345
expected = [
'bm.entity.foo 1.000000 12345',
'bm.entity.state 1.000000 12345',
]
state = mock.MagicMock(state='above_horizon', attributes={'foo': 1.0})
with mock.patch.object(self.gf, '_send_to_graphite') as mock_send:
self.gf._report_attributes('entity', state)
actual = mock_send.call_args_list[0][0][0].split('\n')
self.assertEqual(sorted(expected), sorted(actual))
@mock.patch('time.time')
def test_report_with_binary_state(self, mock_time):
"""Test the reporting with binary state."""
mock_time.return_value = 12345
state = ha.State('domain.entity', STATE_ON, {'foo': 1.0})
with mock.patch.object(self.gf, '_send_to_graphite') as mock_send:
self.gf._report_attributes('entity', state)
expected = ['bm.entity.foo 1.000000 12345',
'bm.entity.state 1.000000 12345']
actual = mock_send.call_args_list[0][0][0].split('\n')
self.assertEqual(sorted(expected), sorted(actual))
state.state = STATE_OFF
with mock.patch.object(self.gf, '_send_to_graphite') as mock_send:
self.gf._report_attributes('entity', state)
expected = ['bm.entity.foo 1.000000 12345',
'bm.entity.state 0.000000 12345']
actual = mock_send.call_args_list[0][0][0].split('\n')
self.assertEqual(sorted(expected), sorted(actual))
@mock.patch('time.time')
def test_send_to_graphite_errors(self, mock_time):
"""Test the sending with errors."""
mock_time.return_value = 12345
state = ha.State('domain.entity', STATE_ON, {'foo': 1.0})
with mock.patch.object(self.gf, '_send_to_graphite') as mock_send:
mock_send.side_effect = socket.error
self.gf._report_attributes('entity', state)
mock_send.side_effect = socket.gaierror
self.gf._report_attributes('entity', state)
@mock.patch('socket.socket')
def test_send_to_graphite(self, mock_socket):
"""Test the sending of data."""
self.gf._send_to_graphite('foo')
mock_socket.assert_called_once_with(socket.AF_INET,
socket.SOCK_STREAM)
sock = mock_socket.return_value
sock.connect.assert_called_once_with(('foo', 123))
sock.sendall.assert_called_once_with('foo'.encode('ascii'))
sock.send.assert_called_once_with('\n'.encode('ascii'))
sock.close.assert_called_once_with()
def test_run_stops(self):
"""Test the stops."""
with mock.patch.object(self.gf, '_queue') as mock_queue:
mock_queue.get.return_value = self.gf._quit_object
self.assertEqual(None, self.gf.run())
mock_queue.get.assert_called_once_with()
mock_queue.task_done.assert_called_once_with()
def test_run(self):
"""Test the running."""
runs = []
event = mock.MagicMock(event_type=EVENT_STATE_CHANGED,
data={'entity_id': 'entity',
| 'new_state': mock.MagicMock()})
def fak | e_get():
if len(runs) >= 2:
return self.gf._quit_object
elif runs:
runs.append(1)
return mock.MagicMock(event_type='somethingelse',
data={'new_event': None})
else:
runs.append(1)
return event
with mock.patch.object(self.gf, '_queue') as mock_queue:
with mock.patch.object(self.gf, '_report_attributes') as mock_r:
mock_queue.get.side_effect = fake_get
self.gf.run()
# Twice for two events, once for the stop
self.assertEqual(3, mock_queue.task_done.call_count)
mock_r.assert_called_once_with(
'entity',
event.data['new_state'])
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import signal
import eventlet
from st2common import log as logging
from st2reactor.container.process_container import ProcessSensorContainer
from st2common.services.sensor_watcher import SensorWatcher
from st2common.models.system.common import ResourceReference
LOG = logging.getLogger(__name__)
class SensorContainerManager(object):
def __init__(self, sensors_partitioner):
self._sensor_container = None
self._sensors_watcher = SensorWatcher(create_handler=self._handle_create_sensor,
update_handler=self._handle_update_sensor,
delete_handler=self._handle_delete_sensor,
queue_suffix='sensor_container')
self._container_thread = None
if not sensors_partitioner:
raise ValueError('sensors_partitioner should be non-None.')
self._sensors_partitioner = sensors_partitioner
def run_sensors(self):
"""
Run all sensors as determined by sensors_partitioner.
"""
sensors = self._sensors_partitioner.get_sensors()
if sensors:
LOG.info('Setting up container to run %d sensors.', len(sensors))
LOG.info('\tSensors list - %s.', [self._get_sensor_ref(sensor) for sensor in sensors])
sensors_to_run = []
for sensor in sensors:
# TODO: Directly pass DB object to the ProcessContainer
sensors_to_run.append(self._to_sensor_object(sensor))
LOG.info('(PID:%s) SensorContainer started.', os.getpid())
self._setup_sigterm_handler()
self._spin_container_and_wait(sensors_to_run)
def _spin_container_and_wait(self, sensors):
try:
self._sensor_container = ProcessSensorContainer(sensors=sensors)
self._container_thread = eventlet.spawn(self._sensor_container.run)
LOG.debug('Starting sensor CUD watcher...')
self._sensors_watcher.start()
exit_code = self._container_thread.wait()
LOG.error('Process container quit with exit_code %d.', exit_code)
LOG.error('(PID:%s) SensorContainer stopped.', os.getpid())
except (KeyboardInterrupt, SystemExit):
self._sensor_container.shutdown()
self._sensors_watcher.stop()
LOG.info('(PID:%s) SensorContainer stopped. Reason - %s', os.getpid(),
sys.exc_info()[0].__name__)
eventlet.kill(self._container_thread)
self._container_thread = None
return 0
def _setup_sigterm_handler(self):
def sigterm_handler(signum=None, frame=None):
# This will cause SystemExit to be throw and we call sensor_container.shutdown()
| # there which cleans things up.
sys.exit(0)
# Register a SIGTERM signal handler which calls sys.exit which causes SystemExit to
# be thrown. We catch SystemExit and handle cleanup there.
signal.signal(signal.SIGTERM, | sigterm_handler)
def _to_sensor_object(self, sensor_db):
file_path = sensor_db.artifact_uri.replace('file://', '')
class_name = sensor_db.entry_point.split('.')[-1]
sensor_obj = {
'pack': sensor_db.pack,
'file_path': file_path,
'class_name': class_name,
'trigger_types': sensor_db.trigger_types,
'poll_interval': sensor_db.poll_interval,
'ref': self._get_sensor_ref(sensor_db)
}
return sensor_obj
#################################################
# Event handler methods for the sensor CUD events
#################################################
def _handle_create_sensor(self, sensor):
if not self._sensors_partitioner.is_sensor_owner(sensor):
LOG.info('sensor %s is not supported. Ignoring create.', self._get_sensor_ref(sensor))
return
if not sensor.enabled:
LOG.info('sensor %s is not enabled.', self._get_sensor_ref(sensor))
return
LOG.info('Adding sensor %s.', self._get_sensor_ref(sensor))
self._sensor_container.add_sensor(sensor=self._to_sensor_object(sensor))
def _handle_update_sensor(self, sensor):
if not self._sensors_partitioner.is_sensor_owner(sensor):
LOG.info('sensor %s is not supported. Ignoring update.', self._get_sensor_ref(sensor))
return
sensor_ref = self._get_sensor_ref(sensor)
sensor_obj = self._to_sensor_object(sensor)
# Handle disabling sensor
if not sensor.enabled:
LOG.info('Sensor %s disabled. Unloading sensor.', sensor_ref)
self._sensor_container.remove_sensor(sensor=sensor_obj)
return
LOG.info('Sensor %s updated. Reloading sensor.', sensor_ref)
try:
self._sensor_container.remove_sensor(sensor=sensor_obj)
except:
LOG.exception('Failed to reload sensor %s', sensor_ref)
else:
self._sensor_container.add_sensor(sensor=sensor_obj)
LOG.info('Sensor %s reloaded.', sensor_ref)
def _handle_delete_sensor(self, sensor):
if not self._sensors_partitioner.is_sensor_owner(sensor):
LOG.info('sensor %s is not supported. Ignoring delete.', self._get_sensor_ref(sensor))
return
LOG.info('Unloading sensor %s.', self._get_sensor_ref(sensor))
self._sensor_container.remove_sensor(sensor=self._to_sensor_object(sensor))
def _get_sensor_ref(self, sensor):
return ResourceReference.to_string_reference(pack=sensor.pack, name=sensor.name)
|
"""Runs fast tests."""
import unittest
from tests.kernel_tests import SwiftKernelTes | ts, OwnKernelTests
from tests.simple_notebook_tests import *
if __name__ == '__main__':
unittest.main()
| |
# Copyright (C) 2014 Robby Zeitfuchs (@robbyFux)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundati | on, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public Li | cense
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class Cridex(Signature):
name = "banker_cridex"
description = "Cridex banking trojan"
severity = 3
alert = True
categories = ["Banking", "Trojan"]
families = ["Cridex"]
authors = ["Robby Zeitfuchs", "@robbyFux"]
minimum = "0.5"
references = ["http://stopmalvertising.com/rootkits/analysis-of-cridex.html",
"http://sempersecurus.blogspot.de/2012/08/cridex-analysis-using-volatility.html",
"http://labs.m86security.com/2012/03/the-cridex-trojan-targets-137-financial-organizations-in-one-go/",
"https://malwr.com/analysis/NDU2ZWJjZTIwYmRiNGVmNWI3MDUyMGExMGQ0MmVhYTY/",
"https://malwr.com/analysis/MTA5YmU4NmIwMjg5NDAxYjlhYzZiZGIwYjZkOTFkOWY/"]
def run(self):
indicators = [".*Local.QM.*",
".*Local.XM.*"]
match_file = self.check_file(pattern=".*\\KB[0-9]{8}\.exe", regex=True)
match_batch_file = self.check_file(pattern=".*\\\\Temp\\\\\S{4}\.tmp\.bat", regex=True)
if match_file and match_batch_file:
self.data.append({"file": match_file})
self.data.append({"batchfile": match_batch_file})
for indicator in indicators:
match_mutex = self.check_mutex(pattern=indicator, regex=True)
if match_mutex:
self.data.append({"mutex": match_mutex})
return True
return False
|
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.db.models.functions import Distance
from django.contrib.gis.measure import (
Area as AreaMeasure, Distance as DistanceMeasure,
)
from django.db.utils import NotSupportedError
from django.utils.functional i | mport cached_property
class BaseSpatialOperations:
# Quick booleans for the type of this spatial backend, and
# an attribute for the spatial database version tuple (if applicable)
postgis = False
spatialite = False
mysql = False
oracle = False
spatial_version = None
# How the geometry column should be selected.
select = '%s'
| @cached_property
def select_extent(self):
return self.select
# Does the spatial database have a geometry or geography type?
geography = False
geometry = False
# Aggregates
disallowed_aggregates = ()
geom_func_prefix = ''
# Mapping between Django function names and backend names, when names do not
# match; used in spatial_function_name().
function_names = {}
# Blacklist/set of known unsupported functions of the backend
unsupported_functions = {
'Area', 'AsGeoJSON', 'AsGML', 'AsKML', 'AsSVG', 'Azimuth',
'BoundingCircle', 'Centroid', 'Difference', 'Distance', 'Envelope',
'GeoHash', 'GeometryDistance', 'Intersection', 'IsValid', 'Length',
'LineLocatePoint', 'MakeValid', 'MemSize', 'NumGeometries',
'NumPoints', 'Perimeter', 'PointOnSurface', 'Reverse', 'Scale',
'SnapToGrid', 'SymDifference', 'Transform', 'Translate', 'Union',
}
# Constructors
from_text = False
# Default conversion functions for aggregates; will be overridden if implemented
# for the spatial backend.
def convert_extent(self, box, srid):
raise NotImplementedError('Aggregate extent not implemented for this spatial backend.')
def convert_extent3d(self, box, srid):
raise NotImplementedError('Aggregate 3D extent not implemented for this spatial backend.')
# For quoting column values, rather than columns.
def geo_quote_name(self, name):
return "'%s'" % name
# GeometryField operations
def geo_db_type(self, f):
"""
Return the database column type for the geometry field on
the spatial backend.
"""
raise NotImplementedError('subclasses of BaseSpatialOperations must provide a geo_db_type() method')
def get_distance(self, f, value, lookup_type):
"""
Return the distance parameters for the given geometry field,
lookup value, and lookup type.
"""
raise NotImplementedError('Distance operations not available on this spatial backend.')
def get_geom_placeholder(self, f, value, compiler):
"""
Return the placeholder for the given geometry field with the given
value. Depending on the spatial backend, the placeholder may contain a
stored procedure call to the transformation function of the spatial
backend.
"""
def transform_value(value, field):
return value is not None and value.srid != field.srid
if hasattr(value, 'as_sql'):
return (
'%s(%%s, %s)' % (self.spatial_function_name('Transform'), f.srid)
if transform_value(value.output_field, f)
else '%s'
)
if transform_value(value, f):
# Add Transform() to the SQL placeholder.
return '%s(%s(%%s,%s), %s)' % (
self.spatial_function_name('Transform'),
self.from_text, value.srid, f.srid,
)
elif self.connection.features.has_spatialrefsys_table:
return '%s(%%s,%s)' % (self.from_text, f.srid)
else:
# For backwards compatibility on MySQL (#27464).
return '%s(%%s)' % self.from_text
def check_expression_support(self, expression):
if isinstance(expression, self.disallowed_aggregates):
raise NotSupportedError(
"%s spatial aggregation is not supported by this database backend." % expression.name
)
super().check_expression_support(expression)
def spatial_aggregate_name(self, agg_name):
raise NotImplementedError('Aggregate support not implemented for this spatial backend.')
def spatial_function_name(self, func_name):
if func_name in self.unsupported_functions:
raise NotSupportedError("This backend doesn't support the %s function." % func_name)
return self.function_names.get(func_name, self.geom_func_prefix + func_name)
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
raise NotImplementedError('Subclasses of BaseSpatialOperations must provide a geometry_columns() method.')
def spatial_ref_sys(self):
raise NotImplementedError('subclasses of BaseSpatialOperations must a provide spatial_ref_sys() method')
distance_expr_for_lookup = staticmethod(Distance)
def get_db_converters(self, expression):
converters = super().get_db_converters(expression)
if isinstance(expression.output_field, GeometryField):
converters.append(self.get_geometry_converter(expression))
return converters
def get_geometry_converter(self, expression):
raise NotImplementedError(
'Subclasses of BaseSpatialOperations must provide a '
'get_geometry_converter() method.'
)
def get_area_att_for_field(self, field):
if field.geodetic(self.connection):
if self.connection.features.supports_area_geodetic:
return 'sq_m'
raise NotImplementedError('Area on geodetic coordinate systems not supported.')
else:
units_name = field.units_name(self.connection)
if units_name:
return AreaMeasure.unit_attname(units_name)
def get_distance_att_for_field(self, field):
dist_att = None
if field.geodetic(self.connection):
if self.connection.features.supports_distance_geodetic:
dist_att = 'm'
else:
units = field.units_name(self.connection)
if units:
dist_att = DistanceMeasure.unit_attname(units)
return dist_att
|
# -*- coding: utf- | 8 -*-
# Generated by Django 1.10.3 on 2016-11-26 22:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('parliament', '0002_auto_20161123_1157'),
]
operations = [
migrations.AlterField(
model_name='politicalparty',
name='name_short',
field=models.CharField(max_length=200),
| ),
]
|
import re
import logging
import urllib
import csv
import os
import shutil
from datetime import datetime
import StringIO
from scrapy.spider import BaseSpider
from scrapy import signals
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy.xlib.pydispatch import dispatcher
from scrapy.exceptions import CloseSpider
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class TomLeeMusicCaSpider(BaseSpider):
name = 'tomleemusic.ca'
allowed_domains = ['tomleemusic.ca', 'competitormonitor.com']
def __init__(self, *args, **kwargs):
super(TomLeeMusicCaSpider, self).__init__(*args, **kwargs)
dispatcher.connect(self.spider_closed, signals.spider_closed)
def start_requests(self):
if self.full_run_required():
start_req = self._start_requests_full()
log.msg('Full run')
else:
start_req = self._start_requests_simple()
log.msg('Simple run')
for req in start_req:
yield req
def spider_closed(self, spider):
if spider.name == self.name:
shutil.copy('data/%s_products.csv' % spider.crawl_id, os.path.join(HERE, 'tomleemusic_products.csv'))
def _start_requests_full(self):
yield Request('http://www.tomleemusic.ca/main/products.cfm', callback=self.parse_full)
def _start_requests_simple(self):
yield Request('http://competitormonitor.com/login.html?action=get_products_api&website_id=470333&matched=1',
callback=self.parse_simple)
def full_run_required(self):
if not os.path.exists(os.path.join(HERE, 'tomleemusic_products.csv')):
return True
#run full only on Mondays
return datetime.now().weekday() == 1
def parse_full(self, response):
hxs = HtmlXPathSelector(response)
for url in hxs.select(u'//a[@class="catLink"]/@href').extract():
yield Request(url, callback=self.parse_product_list)
def parse_product_list(self, response):
hxs = HtmlXPathSelector(response)
for url in hxs.select(u'//a[@class="catLink"]/@href').extract():
yield Request(url, callback=self.parse_product_list)
for url in hxs.select(u'//a[@class="productListLink"]/@href').extract():
url = urljoin_rfc(get_base_url(response), url)
yield Request(url, callback=self.parse_product)
next_page = hxs.select(u'//a[@class="smallPrint" and contains(text(),"Next")]/@href').extract()
if next_page:
url = urljoin_rfc(get_base_url(response), next_page[0])
yield Request(url, callback=self.parse_product_list)
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
product_loader = ProductLoader(item=Product(), selector=hxs)
product_loader.add_value('url', response.url)
product_loader.add_xpath('name', u'//h1[@class="productDetailHeader"]/text()')
if hxs.select(u'//span[@class="productDetailSelling"]/text()'):
product_loader.add_xpath('price', u'//span[@class="productDetailSelling"]/text()')
else:
product_loader.add_value('price', '')
product_loader.add_xpath('sku', u'//input[@type="hidden" and (@name="hidProductId" or @name="inv")]/@value')
product_loader.add_xpath('category', u'//td[@class="smallPrint"]/a[position()=2 and contains(text(),"Products")]/../a[3]/text()')
img = hxs.select(u'//a[@class="smallPrint" and @rel="lightbox"]/@href').extract()
if img:
img = urljoin_rfc(get_base_url(response), img[0])
product_loader.add_value('image_url', img)
if hxs.select(u'//a[contains(@href,"BrandName")]/@href'):
product_loader.add_xpath('brand', u'substring-after(//a[contains(@href,"BrandName")]/@href,"=")')
else:
brands = hxs.select(u'//strong[@class="sideBarText"]/text()').extract()
brands = [b.strip() for b in brands]
for brand in brands:
if product_loader.get_output_value('name').startswith(brand):
product_loader.add_value('brand', brand)
break
else:
product_loader.add_xpath('brand', u'normalize-space(substring-before(substring-after(//title/text(), " - "), " - "))')
# product_loader.add_xpath('shipping_cost', u'//div[@class="DetailRow"]/div[contains(text(),"Shipping")]/../div[2]/text()')
yield product_loader.load_item()
def parse_simple(self, response):
f = StringIO.StringIO(response.body)
hxs = HtmlXPathSelector()
reader = csv.DictReader(f)
self.matched = set()
for row in reader:
self.matched.add(row['url'])
for url in self.matched:
yield Request(url, self.parse_product)
with open(os.path.join(HERE, 'tomleemusic_products.csv')) as f:
reader = csv.DictReader(f)
for row in reader:
if row['url'] not in self.matched:
loader = ProductLoader(selector=hxs, item=Product())
loader.add_value('url', row['url'])
loader.add_value('sku', row['sku'])
loader.add_value('identifier', row['identifier'])
loader.add_value('name', row['name'])
loader.add_value('price', row['price'])
loader.add_value('category', row['category'])
loader.add_value('brand', row['brand'])
| loader.add_value('image_url', row['image_url'])
loader.add_value('shipping_cost', row['shi | pping_cost'])
yield loader.load_item()
|
import matplotlib.pyplot as plt
#stores information about laser structure
#saves refraction and electric field profiles in text and graphic form to HDD
class Laser:
refraction = []
field = []
| gridX = []
gridN = []
field = []
def __init__(self, (wavelength, concentration, thickness)):
if isinstance(wavelength, (int, float)) == False:
raise TypeError("wavelength should be a number")
if isinstance(concentration, list) == False:
raise TypeError("concentration should be a list")
if isinstance( thickness, (list)) == False:
raise TypeError("thickness should be a list")
for i in range(5):
if isi | nstance(concentration[i], (int, float)) == False or isinstance( thickness[i], (int, float)) == False:
raise TypeError("concentration and thickness elements should be numbers")
if wavelength is None:
raise ValueError("wavelength is undefined")
if concentration is None:
raise ValueError("concentration is undefined")
if thickness is None:
raise ValueError("thickness is undefined")
if wavelength < 0.85 or wavelength > 1.5:
raise ValueError("wavelength out of range")
self.wavelength = wavelength
self.concentration = concentration
self.thickness = thickness
#refraction profile output
def plotRefraction(self):
if isinstance(self.gridX, list) == False:
raise TypeError("self.gridX should be a list")
if isinstance(self.gridN, list) == False:
raise TypeError("self.gridN should be a list")
if len(self.gridX) <= 20:
raise ValueError("len(self.gridX) out of range")
if len(self.gridN) <= 20:
raise ValueError("len(self.gridN) out of range")
if (len(self.gridX) == len(self.gridN)) == False:
raise IndexError("self.gridX should be the same dimension as self.gridN")
plt.plot(self.gridX, self.gridN)
plt.xlabel('position, micrometers')
plt.ylabel('refraction index, arb. units')
plt.title('Refraction Index Profile')
plt.savefig('refraction.png', format='png', dpi=100)
plt.clf()
refractionFile = open("refraction.txt", "w")
for i in range(len(self.gridN)):
refractionFile.write(str(self.gridX[i]) + ": " + str(self.gridN[i]) + "\n")
refractionFile.close()
#field profile output
def plotField(self):
if isinstance(self.gridX, list) == False:
raise TypeError("self.gridX should be a list")
if isinstance(self.field, list) == False:
raise TypeError("self.field should be a list")
if len(self.gridX) <= 20:
raise ValueError("len(self.gridX) out of range")
if len(self.field) <= 20:
raise ValueError("len(self.field) out of range")
if (len(self.gridX) == len(self.field)) == False:
raise TypeError("self.gridX should be the same dimension as self.field")
for i in range(len(self.field)):
self.field[i] = self.field[i] ** 2
plt.plot(self.gridX, self.field)
plt.xlabel('position, micrometers')
plt.ylabel('electric field, arb. units')
plt.title('Electric field in laser structure')
plt.savefig('field.png', format='png', dpi=100)
plt.clf()
fieldFile = open("field.txt", "w")
for i in range(len(self.gridN)):
fieldFile.write(str(self.gridX[i]) + ": " + str(self.field[i]) + "\n")
fieldFile.close()
|
"""
The main script
"""
import argparse
import summaryrank.features
import summaryrank.importers
import summaryrank.tools
DESCRIPTION = '''
SummaryRank is a set of tools that help producing machine-learned
summary/sentence rankers. It supports a wide range of functions such
as generating judgments in trec_eval format or creating feature
vectors in the SVMLight format.
corpora tools:
{}
representations and features:
{}
commands:
{}
'''
IMPORTER_FUNCTIONS = [
("import_webap", summaryrank.importers.import_webap),
("import_trec_novelty", summaryrank.importers.import_trec_novelty),
| ("import_mobileclick", summaryrank.importers.import_mobileclick),
]
FEATURE_FUNCTIONS = [
("gen_term", summaryrank.features.gen_term),
("gen_freqstats", summaryrank.features.gen_freqstats),
("gen_esa", summaryrank.features.gen_esa),
("gen_tagme", summaryrank.features.gen_tagme),
("extract", summaryrank.features.extrac | t),
("contextualize", summaryrank.features.contextualize),
]
GENERAL_FUNCTIONS = [
("describe", summaryrank.tools.describe),
("cut", summaryrank.tools.cut),
("join", summaryrank.tools.join),
("shuffle", summaryrank.tools.shuffle),
("split", summaryrank.tools.split),
("normalize", summaryrank.tools.normalize),
]
def _make_command_list(functions):
""" Prepare a formatted list of commands. """
return [' {:24}{}\n'.format(name, func.__doc__.strip().splitlines()[0])
for name, func in functions]
if __name__.endswith('__main__'):
importer_commands = ''.join(_make_command_list(IMPORTER_FUNCTIONS))
feature_commands = ''.join(_make_command_list(FEATURE_FUNCTIONS))
general_commands = ''.join(_make_command_list(GENERAL_FUNCTIONS))
parser = argparse.ArgumentParser(
prog='summaryrank',
formatter_class=argparse.RawDescriptionHelpFormatter,
usage='%(prog)s [options..] command [args..]',
add_help=False,
description=DESCRIPTION.format(
importer_commands, feature_commands, general_commands)
)
parser.add_argument('command', nargs='?', help=argparse.SUPPRESS)
parser.add_argument('argv', nargs=argparse.REMAINDER, help=argparse.SUPPRESS)
args = parser.parse_args()
commands = dict()
commands.update(IMPORTER_FUNCTIONS)
commands.update(FEATURE_FUNCTIONS)
commands.update(GENERAL_FUNCTIONS)
if args.command in commands:
commands[args.command](args.argv)
else:
if args.command is not None:
parser.error("invalid command '{}'".format(args.command))
else:
parser.print_help()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Tests for validator.api.middleware.ssl """
from __future__ import unicode_literals
import mock |
from validator.api.middleware.ssl import SSLMiddleware
from validator.tests.base import ValidatorTestCase
class SSLMiddlewareTestCase(ValidatorTestCase):
""" Tests for class SSLMiddleware """
def setUp(self):
""" Create a SSLMiddleware instance """
super(SSLMiddlewareTestCase, self).setUp()
self.item = SSLMiddleware()
def test_process_request(self):
""" Tests for method process_request """
self.item.external = mock.MagicMock()
input | = "MyInput"
expected = "OK"
self.item.external.return_value = "OK"
observed = self.item.process_request(input)
self.assertEqual(expected, observed)
def tearDown(self):
""" Cleanup the SSLMiddleware instance """
super(SSLMiddlewareTestCase, self).tearDown()
self.m.UnsetStubs()
self.m.ResetAll()
|
# Copyright (c) | 2017 Nick Gashkov
#
# Distributed under MIT License. See LICENSE file for details.
cl | ass ValidationError(Exception):
def __init__(self, *args, **kwargs):
self.error_dict = kwargs.pop('error_dict')
super(ValidationError, self).__init__(*args, **kwargs)
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2010 TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import gettext
_ = gettext.translation('yali', fallback=True).ugettext
from PyQt5.Qt import QWidget, pyqtSignal
import yali.util
import yali.context as ctx
from yali.gui import ScreenWidget
from yali.gui.Ui.bootloaderwidget import Ui_BootLoaderWidget
from yali.storage.bootloader import BOOT_TYPE_NONE, BOOT_TYPE_PARTITION, BOOT_TYPE_MBR, BOOT_TYPE_RAID
class Widget(QWidget, ScreenWidget):
name = "bootloadersetup"
def __init__(self):
QWidget.__init__(self)
self.ui = Ui_BootLoaderWidget()
self.ui.setupUi(self)
self.bootloader = None
self.default = None
self.device = None
self.boot_disk = None
self.boot_partition = None
self.ui.defaultSettings.toggled[bool].connect(self.showDefaultSettings)
self.ui.noInstall.toggled[bool].connect(self.deactivateBootloader)
# self.ui.installPartition.toggled[bool].connect(self.activateInstallPartition)
self.ui.drives.currentIndexChanged[int].connect(self.currentDeviceChanged)
self.ui.advancedSettin | gsBox.show()
self.ui.defaultSettings.setChecked(True)
def fillDrives(self):
self.ui.drives.clear()
for drive in self.bootloader.drives:
device = ctx.storage.devicetree.getDeviceByName(drive)
item = u"%s" % (device.name)
self.ui.drives.addItem(item, d | evice)
def shown(self):
if ctx.flags.install_type == ctx.STEP_RESCUE:
ctx.mainScreen.disableBack()
self.bootloader = ctx.bootloader
self.bootloader.storage = ctx.storage
self.fillDrives()
self.activateChoices()
def backCheck(self):
if ctx.storage.doAutoPart:
ctx.mainScreen.step_increment = 2
ctx.storage.reset()
return True
def execute(self):
self.bootloader.stage1Device = self.device
if self.ui.noInstall.isChecked():
self.bootloader.bootType = BOOT_TYPE_NONE
# elif self.ui.installPartition.isChecked():
# self.bootloader.bootType = BOOT_TYPE_PARTITION
elif self.ui.installMBR.isChecked():
self.bootloader.bootType = BOOT_TYPE_MBR
if ctx.flags.install_type == ctx.STEP_RESCUE:
ctx.mainScreen.step_increment = 2
else:
if ctx.flags.collection:
ctx.collections = yali.util.get_collections()
if len(ctx.collections) <= 1:
ctx.flags.collection = False
ctx.mainScreen.step_increment = 2
else:
ctx.mainScreen.step_increment = 2
return True
def showDefaultSettings(self, state):
if state:
self.device = self.default
self.ui.advancedSettingsBox.hide()
else:
self.ui.advancedSettingsBox.show()
def activateChoices(self):
for choice in self.bootloader.choices.keys():
if choice == BOOT_TYPE_MBR:
self.ui.installMBR.setText(_("The first sector of"))
self.boot_disk = self.bootloader.choices[BOOT_TYPE_MBR][0]
# elif choice == BOOT_TYPE_RAID:
# self.ui.installPartition.setText("The RAID array where Pardus is installed")
# self.boot_partition = self.bootloader.choices[BOOT_TYPE_RAID][0]
# elif choice == BOOT_TYPE_PARTITION:
# self.ui.installPartition.setText(_("The partition where Pardus is installed"))
# self.boot_partition = self.bootloader.choices[BOOT_TYPE_PARTITION][0]
if self.boot_disk:
self.default = self.boot_disk
self.ui.installMBR.setChecked(True)
# else:
# self.default = self.boot_partition
# self.ui.installPartition.setChecked(True)
def deactivateBootloader(self):
self.device = None
def activateInstallPartition(self, state):
if state:
self.device = self.boot_partition
def currentDeviceChanged(self, index):
if index != -1:
self.device = self.ui.drives.itemData(index).name
|
#!/usr/bin/python
#
# to run an example
# python RunMakeFigures.py -p Demo -i 0 -j 1 -f 3FITC_4PE_004.fcs -h ./projects/Demo
#
import getopt,sys,os
import numpy as np
## important line to fix popup error in mac osx
import matplotlib
matplotlib.use('Agg')
from cytostream import Model
import matplotlib.pyplot as plt
## parse inputs
def bad_input():
print "\nERROR: incorrect args"
print sys.argv[0] + "-p projectID -i channel1 -j channel2 -f selectedFile -a alternateDirectory -s subset -t modelType -h homeDir"
print " projectID (-p) project name"
print " channel1 (-i) channel 1 name"
print " channel2 (-j) channel 2 name"
print " homeDir (-h) home directory of current project"
print " selectedFile (-f) name of selected file"
print " altDir (-a) alternative directory (optional)"
print " subset (-s) subsampling number (optional)"
print " modelName (-m) model name"
print " modelType (-t) model type"
print "\n"
sys.exit()
try:
optlist, args = getopt.getopt(sys.argv[1:],'i:j:s:a:p:f:m:t:h:')
except getopt.GetoptError:
print getopt.GetoptError
bad_input()
projectID = None
channel1 = None
channel2 = None
selectedFile = None
altDir = None
homeDir = None
modelType = None
modelName = None
subset = "all"
run = True
for o, a in optlist:
if o == '-i':
channel1 = a
if o == '-j':
channel2 = a
if o == '-f':
selectedFile = a
if o == '-a':
altDir = a
if o == '-p':
projectID = a
if o == '-s':
subset = a
if o == '-m':
modelName = a
if o == '-t':
modelType = a
if o == '-h':
homeDir = a
def make_scatter_plot(model,selectedFile,channel1Ind,channel2Ind,subset='all',labels=None,buff=0.02,altDir=None):
#fig = pyplot.figure(figsize=(7,7))
markerSize = 5
alphaVal = 0.5
fontName = 'arial'
fontSize = 12
plotType = 'png'
## prepare figure
fig = plt.figure(figsize=(7,7))
ax = fig.add_subplot(111)
## specify channels
fileChannels = model.get_file_channel_list(selectedFile)
index1 = int(channel1Ind)
index2 = int(channel2Ind)
channel1 = fileChannels[index1]
channel2 = fileChannels[index2]
data = model.pyfcm_load_fcs_file(selectedFile)
## subset give an numpy array of indices
if subset != "all":
subsampleIndices = model.get_subsample_indices(subset)
data = data[subsampleIndices,:]
## make plot
totalPoints = 0
if labels == None:
ax.scatter([data[:,index1]],[data[:,index2]],color='blue',s=markerSize)
else:
if type(np.array([])) != type(labels):
labels = np.array(labels)
numLabels = np.unique(labels).size
maxLabel = np.max(labels)
cmp = model.get_n_color_colorbar(maxLabel+1)
for l in np.sort(np.unique(labels)):
rgbVal = tuple([val * 256 for val in cmp[l,:3]])
hexColor = model.rgb_to_hex(rgbVal)[:7]
x = data[:,index1][np.where(labels==l)[0]]
y = data[:,index2][np.where(labels==l)[0]]
totalPoints+=x.size
if x.size == 0:
continue
ax.scatter(x,y,color=hexColor,s=markerSize)
#ax.scatter(x,y,color=hexColor,s=markerSize)
## handle data edge buffers
bufferX = buff * (data[:,index1].max() - data[:,index1].min())
bufferY = buff * (data[:,index2].max() - data[:,index2].min())
ax.set_xlim([data[:,index1].min()-bufferX,data[:,index1].max()+bufferX])
ax.set_ylim([data[:,index2].min()-bufferY,data[:,index2].max()+bufferY])
## save file
fileName = selectedFile
ax.set_title("%s_%s_%s"%(channel1,channel2,fileName),fontname=fontName,fontsize=fontSize)
ax.set_xlabel(channel1,fontname=fontName,fontsize=fontSize)
ax.set_ylabel(channel2,fontname=fontName,fontsize=fontSize)
if altDir == None:
fileName = os.path.join(model.homeDir,'figs',"%s_%s_%s.%s"%(selectedFile | [:-4],channel1,channel2,plotType))
fig.savefig(fileName,transparent=False,dpi=50)
else:
fileName = os.path.join(altDir,"%s_%s_%s.%s"%(selectedFile[: | -4],channel1,channel2,plotType))
fig.savefig(fileName,transparent=False,dpi=50)
## error checking
if altDir == 'None':
altDir = None
if homeDir == 'None':
homeDir = None
if modelName == 'None':
modelName = None
statModel,statModelClasses = None,None
if altDir == None and homeDir == None:
bad_input()
run = False
print "WARNING: RunMakeFigures failed errorchecking"
if projectID == None or channel1 == None or channel2 == None or selectedFile == None:
bad_input()
run = False
print "WARNING: RunMakeFigures failed errorchecking"
if os.path.isdir(homeDir) == False:
print "ERROR: homedir does not exist -- bad project name", projectID, homeDir
run = False
if altDir != None and os.path.isdir(altDir) == False:
print "ERROR: specified alternative dir does not exist\n", altDir
run = False
if run == True:
model = Model()
model.initialize(projectID,homeDir)
if modelName == None:
make_scatter_plot(model,selectedFile,channel1,channel2,subset=subset,altDir=altDir)
else:
statModel,statModelClasses = model.load_model_results_pickle(modelName,modelType)
make_scatter_plot(model,selectedFile,channel1,channel2,labels=statModelClasses,subset=subset,altDir=altDir)
|
import json
import os
import socket
import sys
import uuid
import etcd
from tendrl.commons import objects
from tendrl.commons.utils import etcd_utils
from tendrl.commons.utils import event_utils
from tendrl.commons.utils import log_utils as logger
NODE_ID = None
class NodeContext(objects.BaseObject):
def __init__(self, node_id=None, fqdn=None, ipv4_addr=None,
tags=None, status=None, sync_status=None,
last_sync=None, pkey=None,
locked_by=None, *args, **kwargs):
super(NodeContext, self).__init__(*args, **kwargs)
self.node_id = node_id or self._get_node_id() or self._create_node_id()
self.fqdn = fqdn
self.ipv4_addr = ipv4_addr
if self.fqdn:
self.ipv4_addr = socket.gethostbyname(self.fqdn)
self.locked_by = locked_by
curr_tags = []
try:
_nc_data = etcd_utils.read(
"/nodes/%s/NodeContext/data" % self.node_id
).value
curr_tags = json.loads(_nc_data)['tags']
except etcd.EtcdKeyNotFound:
pass
try:
curr_tags = json.loads(curr_tags)
except (ValueError, TypeError):
# No existing tags
pass
self.tags = tags or []
self.tags += NS.config.data.get('tags', [])
self.tags += curr_tags
self.tags = list(set(self.tags))
self.status = status or "UP"
self.sync_status = sync_status
self.last_sync = last_sync
self.pkey = pkey or self.fqdn
self.value = 'nodes/{0}/NodeContext'
def _create_node_id(self):
node_id = str(uuid.uuid4())
try:
logger.log(
"debug",
NS.publisher_id,
{"message": "Registered Node (%s) with " % node_id}
)
except KeyError:
sys.stdout.write("message: Registered Node (%s) \n" % node_id)
local_node_id = "/var/lib/tendrl/node_id"
if not os.path.exists(os.path.dirname(local_node_id)):
os.makedirs(os.path.dirname(local_node_id))
with open(local_node_id, 'wb+') as f:
f.write(node_id)
global NODE_ID
NODE_ID = node_id
return node_id
def _get_node_id(self):
if NODE_ID:
return NODE_ID
local_node_id = "/var/lib/tendrl/node_id"
if os.path.isfile(local_node_id):
with open(local_node_id) as f:
node_id = f.read()
global NODE_ID
NODE_ID = node_id
return node_id
def render(self):
self.value = self.value.format(self.node_id or NS.node_context.node_id)
return super(NodeContext, self).render()
def save(self, update=True, ttl=None):
super(NodeContext, self).save(update)
status = self.value + "/status"
if ttl:
self._ttl = ttl
try:
etcd_utils.refresh(status, ttl)
except etcd.EtcdKeyNotFound:
pass
def on_change(self, attr, prev_value, current_value):
if attr == "status":
_tc = NS.tendrl.objects.TendrlContext(
node_id=self.node_id
).load()
if current_value is None:
self.status = "DOWN"
self.save()
msg = "Node {0} is DOWN".format(self.fqdn)
event_utils.emit_event(
"node_status",
self.status,
msg,
"node_{0}".format(self.fqdn),
"WARNING",
node_id=self.node_id,
integration_id=_tc.integration_id
)
# Load cluster_node_context will load node_context
# and it will be updated with latest values
cluster_node_context = NS.tendrl.objects.ClusterNodeContext(
node_id=self.node_id,
integration_id=_tc.integration_id
)
cluster_node_context.save()
del cluster_node_context
global_details = NS.tendrl.objects.GlobalDetails(
integration_id=_tc.integration_id).load()
if global_details.status.lower() == "healthy":
global_details.status = "unhealthy"
global_details.save()
_cluster = NS.tendrl.objects.Cluster(
integration_id=_tc.integration_id
).load()
msg = "Cluster:%s is %s" % (
_cluster.short_name, "unhealthy")
instance = "cluster_%s" % _tc.integration_id
event_utils.emit_event(
"cluster_health_status",
"unhealthy",
msg,
instance,
'WARNING',
integration_id=_tc.integration_id
)
_tag = "provisioner/%s" % _tc.integration_id
if _tag in self.tags:
_index_key = "/indexes/tags/%s" % _tag
self.tags.remove(_tag)
self.save()
etcd_utils.delete(_index_key)
_msg = "node_sync, STALE provisioner node "\
"found! re-configuring monitoring "\
"(job-id: %s) on this node"
payload = {
"tags": ["tendrl/node_%s" % self.node_id],
"run": "tendrl.flows.ConfigureMonitoring",
"status": "new",
"parameters": {
'TendrlContext.integration_id': _tc.integration_id
| },
"typ | e": "node"
}
_job_id = str(uuid.uuid4())
NS.tendrl.objects.Job(
job_id=_job_id,
status="new",
payload=payload
).save()
logger.log(
"debug",
NS.publisher_id,
{"message": _msg % _job_id}
)
if _tc.sds_name in ["gluster", "RHGS"]:
bricks = etcd_utils.read(
"clusters/{0}/Bricks/all/{1}".format(
_tc.integration_id,
self.fqdn
)
)
for brick in bricks.leaves:
try:
etcd_utils.write(
"{0}/status".format(brick.key),
"Stopped"
)
except (etcd.EtcdAlreadyExist, etcd.EtcdKeyNotFound):
pass
elif current_value == "UP":
msg = "{0} is UP".format(self.fqdn)
event_utils.emit_event(
"node_status",
"UP",
msg,
"node_{0}".format(self.fqdn),
"INFO",
node_id=self.node_id,
integration_id=_tc.integration_id
)
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2011,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obta | in a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations unde | r the License.
def addpkg(*args, **kwargs):
pass
|
#coding=utf8 |
'''
Created on 2012-9-19
@author: senon
'''
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('friendship | s.views',
url(r'^concerned_about_friends/', 'concerned_about_friends')
) |
import ply.yacc as yacc
from bsi_lexer import tokens
from bsi_object import BsiObject
from bsi_array import BsiArray
def p_object_pairs(p):
'obj : pairs'
p[0] = BsiObject()
for pair in p[1]:
p[0].set(pair[0], pair[1])
def p_pairs_pair(p):
'pairs : pair'
p[0] = [p[1]]
def p_pairs_pair_pairs(p):
'pairs : pair pairs'
p[0] = [p[1]] + p[2]
def p_pair_key_eq_value(p):
'pair : KEY EQ val'
p[0] = (p[1], p[3])
def p_val_num(p):
'val : NUM'
p[0] = p[1]
def p_val_string(p):
| 'val : STRING'
p[0] = p[1]
def p_val_array(p):
'val : L_SQ_BR vals R_SQ_BR'
p[0] = BsiArray(p[2])
def p_array_val(p):
'vals : val'
p[0] = [p[1]]
def p_array_vals(p):
'vals : val vals'
p[0] = [p[1]] + p[2]
def p_val_nested_obj(p):
'val : L_BRACE obj R_BRACE'
p[0] = p[2]
def p_error(p):
print p
print "Syntax error in input! | "
bsi_parser = yacc.yacc()
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
from airflow.configuration import conf
from airflow.exceptions i | mport AirflowException
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
_TASK_RUNNER = conf.get('core', 'TASK_RUNNER')
def get_task_runner(local_task_job):
"""
Get the task runner that can be used to run the given job.
:param local_task_job: The LocalTaskJob associated with the TaskInstance
that n | eeds to be executed.
:type local_task_job: airflow.jobs.LocalTaskJob
:return: The task runner to use to run the task.
:rtype: airflow.task.task_runner.base_task_runner.BaseTaskRunner
"""
if _TASK_RUNNER == "StandardTaskRunner":
return StandardTaskRunner(local_task_job)
elif _TASK_RUNNER == "CgroupTaskRunner":
from airflow.task.task_runner.cgroup_task_runner import CgroupTaskRunner
return CgroupTaskRunner(local_task_job)
else:
raise AirflowException("Unknown task runner type {}".format(_TASK_RUNNER))
|
##################################################################################################
# $HeadURL$
##################################################################################################
"""Collection of DIRAC useful statistics related modules.
.. warning::
By default on Error they return None.
"""
__RCSID__ = "$Id$"
from math import sqrt # Mathematical functions.
def getMean( numbers ):
"""Returns the arithmetic mean of a numeric list.
:param list numbers: data sample
"""
if len(numbers):
numbers = sorted([float(x) for x in numbers])
return sum(numbers)/float(len(numbers))
def getMedian( numbers ):
""" Return the median of the list of numbers.
:param list numbers: data sample
"""
# Sort the list and take the middle element.
nbNum = len(numbers)
if not nbNum:
return
copy = sorted( [float(x) for x in numbers] )
if nbNum & 1: # There is an odd number of elements
return copy[nbNum//2]
else:
return 0.5*(copy[nbNum//2 - 1] + copy[nbNum//2])
def getVariance( numbers, posMean='Empty' ):
"""Determine the measure of the spread of the data set about the mean.
Sample variance is determined by default; population variance can be
determined by setting population attribute to True.
:param list nu | mbers: data sample
:param mixed posMean: mean of a sample or 'Empty' str
"""
if not len(numbers):
return
if posMean == 'Empty':
mean = getMean(numbers)
else:
mean = posMean
numbers = sorted( [float(x) for x in numbers] )
# Subtract the mean from each data item and square the difference.
# Sum all the squared deviations.
return sum([(float(item)-mean)**2.0 for item in numbers ])/len(numbers)
def getStandardDeviatio | n(numbers, variance='Empty', mean='Empty'):
"""Determine the measure of the dispersion of the data set based on the
variance.
:param list numbesr: data sample
:param mixed variance: variance or str 'Empty'
:param mixed mean: mean or str 'Empty'
"""
if not len(numbers):
return
# Take the square root of the variance.
if variance == 'Empty':
if mean == 'Empty':
variance = getVariance(numbers)
else:
variance = getVariance(numbers, posMean=mean)
return sqrt(variance)
|
#!/ho | me/jojoriveraa/Dropbox/Capacitación/Platzi/Python-Django/NFCow/venv/bin/python3
from django.core import management
if __name__ == "__main__":
m | anagement.execute_from_command_line()
|
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class TransactionsRuleField(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('value',): {
'TRANSACTION_ID': "TRANSACTION_ID",
'NAME': "NAME",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
| openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {}
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_vi | sited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""TransactionsRuleField - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str): Transaction field for which the rule is defined.., must be one of ["TRANSACTION_ID", "NAME", ] # noqa: E501
Keyword Args:
value (str): Transaction field for which the rule is defined.., must be one of ["TRANSACTION_ID", "NAME", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
|
'''Example script to generate text from Nietzsche's writings.
At least 20 epochs are required before the generated text
starts sounding coherent.
It is recommended to run this script on GPU, as recurrent
networks are quite computationally intensive.
If you try this script on new data, make sure your corpus
has at least ~100k characters. ~1M is better.
'''
from __future__ import print_function
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import LSTM
from keras.optimizers import RMSprop
from keras.utils.data_utils import get_file
import numpy as np
import random
import sys
path = get_file('nietzsche.txt', origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt")
text = open(path).read().lower()
print('corpus length:', len(text))
chars = sorted(list(set(text)))
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# cut the text in semi-redundant sequences of maxlen characters
maxlen = 40
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i: i + maxlen])
next_chars.append(text[i + maxlen])
print('nb sequences:', len(sentences))
print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
# build the model: a single LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(128, input_shape=(maxlen, len(chars))))
model.add(Dense(len(chars)))
model.add(Activation('softmax'))
optimizer = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
# train the model, output generated text after each iteration
for iteration in range(1, 60):
print()
print('-' * 50)
print('Iteration', iteration)
model.fit(X, y, batch_size=128, nb_epoch=1)
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0, 1.2]:
print()
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(400):
x = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x[0, t, char_indices[c | har]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
gene | rated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
model.save_weights('data/nietzsche_simple_TF.h5')
|
#!/usr/bin/env python3
# Copyright (c) 2020-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test block-relay-only anchors functionality"""
import os
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import check_node_connections
INBOUND_CONNECTIONS = 5
BLOCK_RELAY_CONNECTIONS = 2
class AnchorsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.disable_autoconnect = False
def run_test(self):
node_anchors_path = os.path.join(
self.nodes[0].datadir, "regtest", "anchors.dat"
)
self.log.info("When node starts, check if anchors.dat doesn't exist")
assert not os.path.exists(node_anchors_path)
self.log.info(f"Add {BLOCK_RELAY_CONNECTIONS} block-relay-only connections to node")
for i in range(BLOCK_RELAY_CONNECTIONS):
self.log.debug(f"block-relay-only: {i}")
self.nodes[0].add_outbound_p2p_connection(
P2PInterface(), p2p_idx=i, connection_type="block-relay-only"
)
self.log.info(f"Add {INBOUND_CONNECTIONS} inbound connections to node")
for i in range(INBOUND_CONNECTIONS):
self.log.debug(f"inbound: {i}")
self.nodes[0].add_p2p_connection(P2PInterface())
self.log.info("Check node connections")
check_node_connections(node=self.nodes[0], num_in=5, num_out= | 2)
# 127.0.0.1
ip = "7f000001"
# Since the ip is always 127.0.0.1 for this case,
# we store only the port to identify the peers
block_relay_nodes_port = []
inbound_nodes_port = []
for p in self.nodes[0].getpeerinfo(): |
addr_split = p["addr"].split(":")
if p["connection_type"] == "block-relay-only":
block_relay_nodes_port.append(hex(int(addr_split[1]))[2:])
else:
inbound_nodes_port.append(hex(int(addr_split[1]))[2:])
self.log.info("Stop node 0")
self.stop_node(0)
# It should contain only the block-relay-only addresses
self.log.info("Check the addresses in anchors.dat")
with open(node_anchors_path, "rb") as file_handler:
anchors = file_handler.read().hex()
for port in block_relay_nodes_port:
ip_port = ip + port
assert ip_port in anchors
for port in inbound_nodes_port:
ip_port = ip + port
assert ip_port not in anchors
self.log.info("Start node")
self.start_node(0)
self.log.info("When node starts, check if anchors.dat doesn't exist anymore")
assert not os.path.exists(node_anchors_path)
if __name__ == "__main__":
AnchorsTest().main()
|
import ray
from ray._private.test_utils import run_string_a | s_driver
# This tests the queue transitions for infeasible tasks. This has been an issue
# in the past, e.g., https://github.com/ray-project/ray/issues/3275.
def test_infeasible_tasks(ray_start_cluster):
cluster = ray_start_cluster
@ray.remote
def f():
return
cluster.add_node(resources={str(0): 100})
| ray.init(address=cluster.address)
# Submit an infeasible task.
x_id = f._remote(args=[], kwargs={}, resources={str(1): 1})
# Add a node that makes the task feasible and make sure we can get the
# result.
cluster.add_node(resources={str(1): 100})
ray.get(x_id)
# Start a driver that submits an infeasible task and then let it exit.
driver_script = """
import ray
ray.init(address="{}")
@ray.remote(resources={})
def f():
{}pass # This is a weird hack to insert some blank space.
f.remote()
""".format(
cluster.address, "{str(2): 1}", " "
)
run_string_as_driver(driver_script)
# Now add a new node that makes the task feasible.
cluster.add_node(resources={str(2): 100})
# Make sure we can still run tasks on all nodes.
ray.get([f._remote(args=[], kwargs={}, resources={str(i): 1}) for i in range(3)])
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Finds revisions from the Thunderbird migration that don't have based_on
set correctly, and are still relavent, and fixes that.
Run this script like `./manage.py runscript fix_tb_basedon`.
"""
import sys
from traceback import print_exc
from django.db.models import Q
from kitsune.wiki.models import Document, Revision
def run():
try:
run_()
except Exception:
print_exc()
raise
class Progress():
def __init__(self, total):
self.current = 0
self.total = total
def tick(self, incr=1):
self.current += incr
self.draw()
def draw(self):
self._wr('{0.current} / {0.total}\r'.format(self))
def _wr(self, s):
sys.stdout.write(s)
sys.stdout.flush()
def run_():
to_process = list(Document.objects.filter(
~Q(parent=None),
current_revisio | n__based_on=None,
products__slug='thunderbird'))
if len(to_process) == 0:
print 'Nothing to do.'
prog = Progress(len(to_process))
for doc in to_process:
prog.tick()
oldest_parent_rev = (Revision.objects.filter(document=doc.parent)
.order_by('id')[0])
# It has localizations, clearly it should be localizable.
if not doc.parent.is_localizable:
doc.parent.is_localizable = True
| doc.parent.save()
doc.current_revision.based_on = oldest_parent_rev
doc.current_revision.save()
|
from pidW | X impor | t *
|
# -*- coding: utf-8 -*-
import os
from future.moves.urllib.parse import quote
import uuid
import ssl
from pymongo import MongoClient
import requests
from django.apps import apps
from addons.wiki import settings as wiki_settings
from addons.wiki.exceptions import InvalidVersionError
from osf.utils.permissions import ADMIN, READ, WRITE
# MongoDB forbids field names that begin with "$" or contain ".". These
# utilities map to and from Mongo field names.
mongo_map = {
'.': '__!dot!__',
'$': '__!dollar!__',
}
def to_mongo(item):
for key, value in mongo_map.items():
item = item.replace(key, value)
return item
def to_mongo_key(item):
return to_mongo(item).strip().lower()
def generate_private_uuid(node, wname):
"""
Generate private uuid for internal use in sharejs namespacing.
Note that this will NEVER be passed to to the client or sharejs.
"""
private_uuid = str(uuid.uuid1())
wiki_key = to_mongo_key(wname)
node.wiki_private_uuids[wiki_key] = private_uuid
node.save()
return private_uuid
def get_sharejs_uuid(node, wname):
"""
Format private uuid into the form used in mongo and sharejs.
This includes node's primary ID to prevent fork namespace collision
"""
wiki_key = to_mongo_key(wname)
private_uuid = node.wiki_private_uuids.get(wiki_key)
return str(uuid.uuid5(
uuid.UUID(private_uuid),
str(node._id)
)) if private_uuid else None
def delete_share_doc(node, wname):
"""Deletes share document and removes namespace from model."""
db = share_db()
sharejs_uuid = get_sharejs_uuid(node, wname)
db['docs'].remove({'_id': sharejs_uuid})
db['docs_ops'].remove({'name': sharejs_uuid})
wiki_key = to_mongo_key(wname)
del node.wiki_private_uuids[wiki_key]
node.save()
def migrate_uuid(node, wname):
"""Migrates uuid to new namespace."""
db = share_db()
old_sharejs_uuid = get_sharejs_uuid(node, wname)
broadcast_to_sharejs('lock', old_sharejs_uuid)
generate_private_uuid(node, wname)
new_sharejs_uuid = get_sharejs_uuid(node, wname)
doc_item = db['docs'].find_one({'_id': old_sharejs_uuid})
if doc_item:
doc_item['_id'] = new_sharejs_uuid
db['docs'].insert(doc_item)
db['docs'].remove({'_id': old_sharejs_uuid})
ops_items = [item for item in db['docs_ops'].find({'name': old_sharejs_uuid})]
if ops_items:
for item in ops_items:
item['_id'] = item['_id'].replace(old_sharejs_uuid, new_sharejs_uuid)
item['name'] = new_sharejs_uuid
db['docs_ops'].insert(ops_items)
db['docs_ops'].remove({'name': old_sharejs_uuid})
write_contributors = [
user._id for user in node.contributors
if node.has_permission(user, WRITE)
]
broadcast_to_sharejs('unlock', old_sharejs_uuid, data=write_contributors)
def share_db():
"""Generate db client for sharejs db"""
client = MongoClient(wiki_settings.SHAREJS_DB_URL, ssl_cert_reqs=ssl.CERT_NONE)
return client[wiki_settings.SHAREJS_DB_NAME]
def get_sharejs_content(node, wname):
db = share_db()
sharejs_uuid = get_sharejs_uuid(node, wname)
doc_item = db['docs'].find_one({'_id': sharejs_uuid})
return doc_item['_data'] if doc_item else ''
def broadcast_to_sharejs(action, sharejs_uuid, node=None, wiki_name='home', data=None):
"""
Broadcast an action to all documents connected to a wiki.
Actions include 'lock', 'unlock', 'redirect', and 'delete'
'redirect' and 'delete' both require a node to be specified
'unlock' requires data to be a list of contributors with write permission
"""
url = 'http://{host}:{port}/{action}/{id}/'.format(
host=wiki_settings.SHAREJS_HOST,
port=wiki_settings.SHAREJS_PORT,
action=action,
id=sharejs_uuid
)
if action == 'redirect' or action == 'delete':
redirect_url = quote(
node.web_url_for('project_wiki_view', wname=wiki_name, _guid=True),
safe='',
)
url = os.path.join(url, redirect_url)
try:
requests.post(url, json=data)
except requests.ConnectionError:
pass # Assume sharejs is not online
def format_wiki_version(version, num_versions, allow_preview):
"""
:param str version: 'preview', 'current', 'previous', '1', '2', ...
:param int num_versions:
:param allow_preview: True if view, False if compare
"""
if not version:
return
if version.isdigit():
version = int(version)
if version > num_versions or version < 1:
raise InvalidVersionError
elif version == num_versions:
return 'current'
elif version == num_versions - 1:
return 'previous'
elif version != 'current' and version != 'previous':
if allow_preview and version == 'preview':
return version
raise InvalidVersionError
elif version == 'previous' and num_versions == 0:
raise InvalidVersionError
return version
def serialize_wiki_settings(user, nodes):
""" Format wiki data for project settings page
:param user: modular odm User object
:param nodes: list of parent project nodes
:return: treebeard-formatted data
"""
WikiPage = apps.get_model('addons_wiki.WikiPage')
items = []
for node in nodes:
assert node, '{} is not a valid Node.'.format(node._id)
can_read = node.has_permission(user, READ)
is_admin = node.has_permission(user, ADMIN)
include_wiki_settings = WikiPage.objects.include_wiki_settings(node)
if not include_wiki_settings:
continue
children = node.get_nodes(**{'is_deleted': False, 'is_node_link': False})
children_tree = []
wiki = node.get_addon('wiki')
if wiki:
children_tree.append({
'select': {
'title': 'permission',
'permission':
'public'
if wiki.is_publicly_editable
else 'private'
},
})
children_tree.extend(serialize_wiki_settings(user, children))
item = {
'node': {
'id': node._id,
'url': node.url if can_read else '',
'title': node.title if can_read else 'Private Project',
'is_public': node.is_public
},
'children': children_tree,
'kind': 'folder' if not node.parent_node or not node.parent_node.has_permission(user, READ) else 'node',
'nodeType': node.project_or_component,
'category': node.category,
'permissions': {
'view': can_read,
'admin': is_admin,
},
}
items.append(item)
return items
def serialize_wiki_widget(node):
from addons.wiki.models import WikiVersion
wiki = node.get_addon('wiki')
wiki_version = WikiVersion.objects.get_for_node(node, 'home')
# Show "Read more" link if there are multiple pages or has > 400 characters
more = node.wikis.filter(deleted__isnull=True).count() >= 2
MAX_DISPLAY_LENGTH = 400
rendered_before_update = False
if wiki_version and wiki_version.content:
if len(wiki_version.content) > MAX_DISPLAY_LENGTH:
more = True
rendered_before_update = wiki_version.rendered_before_update
# Content fetched and rendered by front-end
wiki_html = None
wiki_widget_dat | a = {
'complete': True,
'wiki_content': wiki_html if wiki_html else None,
'wiki | _content_url': node.api_url_for('wiki_page_content', wname='home'),
'rendered_before_update': rendered_before_update,
'more': more,
'include': False,
}
wiki_widget_data.update(wiki.config.to_json())
return wiki_widget_data
|
# -*- coding: utf-8 -*-
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
( | 'cfp', '0004_paperapplication_duration'),
]
operations = [
migrations.AlterField(
model_name='applicant',
name='user',
field=models.OneToOneField(related_name='applicant', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE),
preserve_default=True,
),
| migrations.AlterField(
model_name='paperapplication',
name='applicant',
field=models.ForeignKey(related_name='applications', to='cfp.Applicant', on_delete=models.CASCADE),
preserve_default=True,
),
]
|
# BEGIN_COPYRIGHT
#
# Copyright 2009-2021 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licen | ses/LICENSE-2.0
#
# U | nless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""
A trivial MapReduce application that counts the occurence of each
vowel in a text input stream. It is more structured than would be
necessary because we want to test automatic distribution of a package
rather than a single module.
"""
|
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pyasn1/license.html
#
import logging
from pyasn1 import __version__
from pyasn1 import error
from pyasn1.compat.octets import octs2ints
__all__ = ['Debug', 'setLogger', 'hexdump']
flagNone = 0x0000
flagEncoder = 0x0001
flagDecoder = 0x0002
flagAll = 0xffff
flagMap = {
'none' | : flagNone,
'encoder': flagEncoder,
'decoder': flagDecod | er,
'all': flagAll
}
class Printer(object):
# noinspection PyShadowingNames
def __init__(self, logger=None, handler=None, formatter=None):
if logger is None:
logger = logging.getLogger('pyasn1')
logger.setLevel(logging.DEBUG)
if handler is None:
handler = logging.StreamHandler()
if formatter is None:
formatter = logging.Formatter('%(asctime)s %(name)s: %(message)s')
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
self.__logger = logger
def __call__(self, msg):
self.__logger.debug(msg)
def __str__(self):
return '<python logging>'
if hasattr(logging, 'NullHandler'):
NullHandler = logging.NullHandler
else:
# Python 2.6 and older
class NullHandler(logging.Handler):
def emit(self, record):
pass
class Debug(object):
defaultPrinter = Printer()
def __init__(self, *flags, **options):
self._flags = flagNone
if 'loggerName' in options:
# route our logs to parent logger
self._printer = Printer(
logger=logging.getLogger(options['loggerName']),
handler=NullHandler()
)
elif 'printer' in options:
self._printer = options.get('printer')
else:
self._printer = self.defaultPrinter
self._printer('running pyasn1 %s, debug flags %s' % (__version__, ', '.join(flags)))
for flag in flags:
inverse = flag and flag[0] in ('!', '~')
if inverse:
flag = flag[1:]
try:
if inverse:
self._flags &= ~flagMap[flag]
else:
self._flags |= flagMap[flag]
except KeyError:
raise error.PyAsn1Error('bad debug flag %s' % flag)
self._printer("debug category '%s' %s" % (flag, inverse and 'disabled' or 'enabled'))
def __str__(self):
return 'logger %s, flags %x' % (self._printer, self._flags)
def __call__(self, msg):
self._printer(msg)
def __and__(self, flag):
return self._flags & flag
def __rand__(self, flag):
return flag & self._flags
logger = 0
def setLogger(userLogger):
global logger
if userLogger:
logger = userLogger
else:
logger = 0
def hexdump(octets):
return ' '.join(
['%s%.2X' % (n % 16 == 0 and ('\n%.5d: ' % n) or '', x)
for n, x in zip(range(len(octets)), octs2ints(octets))]
)
class Scope(object):
def __init__(self):
self._list = []
def __str__(self): return '.'.join(self._list)
def push(self, token):
self._list.append(token)
def pop(self):
return self._list.pop()
scope = Scope()
|
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for
# the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
example_perplex
---------------
This minimal example demonstrates how burnman can be used
to read and interrogate a PerpleX tab file
(as produced by burnman/misc/create_burnman_readable_perplex_table.py
It also demonstrates how we can smooth a given property on a given P-T grid.
*Uses:*
* :doc:`PerplexMaterial`
* :func:`burnman.Material.evaluate`
* :func:`burnman.tools.math.smooth_array`
*Demonstrates:*
* Use of PerplexMaterial
* Smoothing gridded properties
"""
import numpy as np
import matplotlib.pyplot as plt
import burnman
from burnman.tools.math import smooth_array
if __name__ == "__main__":
rock = burn | man.PerplexMaterial('../burnman/data/input_perplex/in23_1.tab')
P = 1.e9
T = 1650.
rock.set_state(P, T)
print('P: {0:.1f} GPa, T: {1:.1f} K, density: {2:.1f} kg/m^3'.format(P/1.e9, T, rock.rho))
pressures = np.linspace(10.e9, 25.e9, 151)
temperatur | es = [T] * len(pressures)
densities = rock.evaluate(['rho'], pressures, temperatures)[0]
plt.plot(pressures/1.e9, densities)
plt.xlabel('Pressure (GPa)')
plt.ylabel('Density (kg/m^3)')
plt.show()
pressures = np.linspace(10.e9, 25.e9, 151)
temperatures = np.linspace(1600., 1800., 3)
T = 1650.
entropies = rock.evaluate(['S'], pressures,
np.array([T] * len(pressures)))[0]
smoothed_entropies = smooth_array(array=entropies,
grid_spacing=np.array([pressures[1]
- pressures[0]]),
gaussian_rms_widths=np.array([5.e8]))
plt.plot(pressures/1.e9, entropies, label='entropies')
plt.plot(pressures/1.e9, smoothed_entropies, label='smoothed entropies')
plt.xlabel('Pressure (GPa)')
plt.ylabel('Entropy (J/K/mol)')
plt.legend(loc='upper right')
plt.show()
|
assert_array_almost_equal(ivim_fit_multi.model_params, ivim_params_trr)
assert_array_almost_equal(est_signal, data_multi)
def test_ivim_errors():
"""
Test if errors raised in the module are working correctly.
Scipy introduced bounded least squares fitting in the version 0.17
and is not supported by the older versions. Initializing an IvimModel
with bounds for older Scipy versions should raise an error.
"""
ivim_model_trr = IvimModel(gtab, bounds=([0., 0., 0., 0.],
[np.inf, 1., 1., 1.]),
fit_method='trr')
ivim_fit = ivim_model_trr.fit(data_multi)
est_signal = ivim_fit.predict(gtab, S0=1.)
assert_array_equal(est_signal.shape, data_multi.shape)
assert_array_almost_equal(ivim_fit.model_params, ivim_params_trr)
assert_array_almost_equal(est_signal, data_multi)
def test_mask():
"""
Test whether setting incorrect mask raises and error
"""
mask_correct = data_multi[..., 0] > 0.2
mask_not_correct = np.array([[False, True, False], [True, False]],
dtype=np.bool)
ivim_fit = ivim_model_trr.fit(data_multi, mask_correct)
est_signal = ivim_fit.predict(gtab, S0=1.)
assert_array_equal(est_signal.shape, data_multi.shape)
assert_array_almost_equal(est_signal, data_multi)
assert_array_almost_equal(ivim_fit.model_params, ivim_params_trr)
assert_raises(ValueError, ivim_model_trr.fit, data_multi,
mask=mask_not_correct)
def test_with_higher_S0():
"""
Test whether fitting works for S0 > 1.
"""
# params for a single voxel
S0_2 = 1000.
params2 = np.array([S0_2, f, D_star, D])
mevals2 = np.array(([D_star, D_star, D_star], [D, D, D]))
# This gives an isotropic signal.
signal2 = multi_tensor(gtab, mevals2, snr=None, S0=S0_2,
fractions=[f * 100, 100 * (1 - f)])
# Single voxel data
data_single2 = signal2[0]
ivim_fit = ivim_model_trr.fit(data_single2)
est_signal = ivim_fit.predict(gtab)
assert_array_equal(est_signal.shape, data_single2.shape)
assert_array_almost_equal(est_signal, data_single2)
assert_array_almost_equal(ivim_fit.model_params, params2)
def test_b0_threshold_greater_than0():
"""
Added test case for default b0_threshold set to 50.
Checks if error is thrown correctly.
"""
bvals_b0t = np.array([50., 10., 20., 30., 40., 60., 80., 100.,
120., 140., 160., 180., 200., 300., 400.,
500., 600., 700., 800., 900., 1000.])
N = len(bvals_b0t)
bvecs = generate_bvecs(N)
gtab = gradient_table(bvals_b0t, bvecs.T)
with assert_raises(ValueError) as vae:
_ = IvimModel(gtab, fit_method='trr')
b0_s = "The IVIM model requires a measurement at b==0. As of "
assert b0_s in vae.exception
def test_bounds_x0():
"""
Test to check if setting bounds for signal where initial value is
higher than subsequent values works.
These values are from the IVIM dataset which can be obtained by using
the `read_ivim` function from dipy.data.fetcher. These are values from
the voxel [160, 98, 33] which can be obtained by :
.. code-block:: python
from dipy.data.fetcher import read_ivim
img, gtab = read_ivim()
data = load_nifti_data(img)
signal = data[160, 98, 33, :]
"""
x0_test = np.array([1., 0.13, 0.001, 0.0001])
test_signal = ivim_prediction(x0_test, gtab)
ivim_fit = ivim_model_trr.fit(test_signal)
est_signal = ivim_fit.predict(gtab)
assert_array_equal(est_signal.shape, test_signal.shape)
def test_predict():
"""
Test the model prediction API.
The predict method is already used in previous tests for estimation of the
signal. But here, we will test is separately.
"""
assert_array_almost_equal(ivim_fit_single.predict(gtab),
data_single)
assert_array_almost_equal(ivim_model_trr.predict
(ivim_fit_single.model_params, gtab),
data_single)
ivim_fit_multi = ivim_model_trr.fit(data_multi)
assert_array_almost_equal(ivim_fit_multi.predict(gtab),
data_multi)
def test_fit_object():
"""
Test the method of IvimFit class
"""
assert_raises(IndexError, ivim_fit_single.__getitem__, (-.1, 0, 0))
# Check if the S0 called is matching
assert_array_almost_equal(
ivim_fit_single.__getitem__(0).model_params, 1000.)
ivim_fit_multi = ivim_model_trr.fit(data_multi)
# Should raise a TypeError if the arguments are not passed as tuple
assert_raises(TypeError, ivim_fit_multi.__getitem__, -.1, 0)
# Should return IndexError if invalid indices are passed
assert_raises(IndexError, ivim_fit_multi.__getitem__, (100, -0))
assert_raises(IndexError, ivim_fit_multi.__getitem__, (100, -0, 2))
assert_raises(IndexError, ivim_fit_multi.__getitem__, (-100, 0))
assert_raises(IndexError, ivim_fit_multi.__getitem__, [-100, 0])
assert_raises(IndexError, ivim_fit_multi.__getitem__, (1, 0, 0, 3, 4))
# Check if the get item returns the S0 value for voxel (1,0,0)
assert_array_almost_equal(
ivim_fit_multi.__getitem__((1, 0, 0)).model_params[0],
data_multi[1, 0, 0][0])
def test_shape():
"""
Test if `shape` in `IvimFit` class gives the correct output.
"""
assert_array_equal(ivim_fit_single.shape, ())
ivim_fit_multi = ivim_model_trr.fit(data_multi)
assert_array_equal(ivim_fit_multi.shape, (2, 2, 1))
def test_multiple_b0():
| # Generate a signal with multiple b0
# This gives an isotropic signal.
signal = multi_tensor(gtab_with_multiple_b0, mevals, snr=None, S0=S0,
fractions=[f * 100, 100 * (1 - f)])
# Single | voxel data
data_single = signal[0]
ivim_model_multiple_b0 = IvimModel(gtab_with_multiple_b0, fit_method='trr')
ivim_model_multiple_b0.fit(data_single)
# Test if all signals are positive
def test_no_b0():
assert_raises(ValueError, IvimModel, gtab_no_b0)
def test_noisy_fit():
"""
Test fitting for noisy signals. This tests whether the threshold condition
applies correctly and returns the linear fitting parameters.
For older scipy versions, the returned value of `f` from a linear fit is
around 135 and D and D_star values are equal. Hence doing a test based on
Scipy version.
"""
model_one_stage = IvimModel(gtab, fit_method='trr')
with warnings.catch_warnings(record=True) as w:
fit_one_stage = model_one_stage.fit(noisy_single)
assert_equal(len(w), 3)
for l_w in w:
assert_(issubclass(l_w.category, UserWarning))
assert_("" in str(w[0].message))
assert_("x0 obtained from linear fitting is not feasibile" in
str(w[0].message))
assert_("x0 is unfeasible" in str(w[1].message))
assert_("Bounds are violated for leastsq fitting" in str(w[2].message))
assert_array_less(fit_one_stage.model_params, [10000., 0.3, .01, 0.001])
def test_S0():
"""
Test if the `IvimFit` class returns the correct S0
"""
assert_array_almost_equal(ivim_fit_single.S0_predicted, S0)
assert_array_almost_equal(ivim_fit_multi.S0_predicted,
ivim_params_trr[..., 0])
def test_perfusion_fraction():
"""
Test if the `IvimFit` class returns the correct f
"""
assert_array_almost_equal(ivim_fit_single.perfusion_fraction, f)
assert_array_almost_equal(
ivim_fit_multi.perfusion_fraction, ivim_params_trr[..., 1])
def test_D_star():
"""
Test if the `IvimFit` class returns the correct D_star
"""
assert_array_almost_equal(ivim_fit_single.D_star, D_star)
assert_array_almost_equal(ivim_fit_multi.D_star, ivim_params_trr[..., 2])
def test_D():
"""
Test if the `IvimFit` class returns the correct D
"""
assert_array_almost_equal(ivim_fit_single.D, D)
assert_array_almost_equal(ivim_fit_multi.D, ivim_params_trr[.. |
import os
import shutil
from cumulusci.core.exceptions import DependencyResolutionError
from cumulusci.core.github import get_github_api_for_repo
from cumulusci.core.github import find_latest_release
from cumulusci.core.github import find_previous_release
from cumulusci.utils import download_extract_github
class GitHubSource:
def __init__(self, project_config, spec):
self.project_config = project_config
self.spec = spec
self.url = spec["github"]
if self.url.endswith(".git"):
self.url = self.url[:-4]
repo_owner, repo_name = self.url.split("/")[-2:]
self.repo_owner = repo_owner
self.repo_name = repo_name
self.gh = get_github_api_for_repo(
project_config.keychain, repo_owner, repo_name
)
self.repo = self.gh.repository(self.repo_owner, self.repo_name)
self.resolve()
def __repr__(self):
return f"<GitHubSource {str(self)}>"
def __str__(self):
s = f"GitHub: {self.repo_owner}/{self.repo_name}"
if self.description:
s += f" @ {self.description}"
if self.commit != self.description:
s += f" ({self.commit})"
return s
def __hash__(self):
return hash((self.url, self.commit))
def resolve(self):
"""Resolve a github source into a specific commit.
The spec must include:
- github: the URL of the github repository
| The spec may include one of:
- commit: a commit hash
- ref: a git ref
- branch: a git branch
- tag: a git tag
- release: "latest" | "previous" | "latest_beta"
If none of these are specified, CumulusCI will look for the latest release.
If there is no release, it will use the default branch.
"""
ref = | None
if "commit" in self.spec:
self.commit = self.description = self.spec["commit"]
return
elif "ref" in self.spec:
ref = self.spec["ref"]
elif "tag" in self.spec:
ref = "tags/" + self.spec["tag"]
elif "branch" in self.spec:
ref = "heads/" + self.spec["branch"]
elif "release" in self.spec:
release_spec = self.spec["release"]
if release_spec == "latest":
release = find_latest_release(self.repo, include_beta=False)
elif release_spec == "latest_beta":
release = find_latest_release(self.repo, include_beta=True)
elif release_spec == "previous":
release = find_previous_release(self.repo)
else:
raise DependencyResolutionError(f"Unknown release: {release_spec}")
if release is None:
raise DependencyResolutionError(
f"Could not find release: {release_spec}"
)
ref = "tags/" + release.tag_name
if ref is None:
release = find_latest_release(self.repo, include_beta=False)
if release:
ref = "tags/" + release.tag_name
else:
ref = "heads/" + self.repo.default_branch
self.description = ref[6:] if ref.startswith("heads/") else ref
self.commit = self.repo.ref(ref).object.sha
def fetch(self, path=None):
"""Fetch the archive of the specified commit and construct its project config."""
# To do: copy this from a shared cache
if path is None:
path = (
self.project_config.cache_dir
/ "projects"
/ self.repo_name
/ self.commit
)
if not path.exists():
path.mkdir(parents=True)
zf = download_extract_github(
self.gh, self.repo_owner, self.repo_name, ref=self.commit
)
try:
zf.extractall(path)
except Exception:
# make sure we don't leave an incomplete cache
shutil.rmtree(path)
raise
assert path.is_dir()
project_config = self.project_config.construct_subproject_config(
repo_info={
"root": os.path.realpath(path),
"owner": self.repo_owner,
"name": self.repo_name,
"url": self.url,
"commit": self.commit,
}
)
return project_config
@property
def frozenspec(self):
"""Return a spec to reconstruct this source at the current commit"""
return {
"github": self.url,
"commit": self.commit,
"description": self.description,
}
|
# assign epitope fitness to each node in the phylogeny
import time
from io_util import *
from tree_util import *
from date_util import *
from seq_util import *
import numpy as np
from itertools import izip
from collections import defaultdict
def append_nonepitope_sites(viruses):
for virus in viruses:
sites_ne = nonepitope_sites(virus['seq'])
virus['sites_ne'] = sites_ne
def remove_nonepitope_sites(viruses):
for virus in viruses:
virus.pop("sites_ne", None)
def remove_nonepitope_distances(viruses):
for virus in viruses:
virus.pop("distance_ne", None)
def most_frequent(char_list):
d = defaultdict(int)
for i in char_list:
d[i] += 1
return sorted(d.iteritems(), key=lambda x: x[1], reverse=True)[0][0]
def consensus_nonepitope(viruses):
"""Return consensus non-epitope sequence"""
consensus = ""
length = len(viruses[0]['sites_ne'])
for i in range(0, length):
column = [v['sites_ne'][i] for v in viruses]
consensus += most_frequent(column)
return consensus
def distance_to_consensus(virus, consensus_ne):
"""Return distance of virusA to virusB by comparing non-epitope sites"""
virus_ne = virus['sites_ne']
ne_distance = sum(a != b for a, b in izip(virus_ne, consensus_ne))
return ne_distance
def compute(viruses):
"""Append non-epitope distances to each virus"""
print "Computing epitope distances"
consensus = consensus_nonepitope(viruses)
for virus in viruses:
distance = distance_to_consensus(virus, consensus)
virus['distance_ne'] = distance
print virus['strain'] + ": " + str(virus['distance_ne'] | )
def normalize(viruses):
"""Normalizing non-epitope distances to give non-epitope fitness"""
print "Normalizing non-epitope distances"
distan | ces = [v['distance_ne'] for v in viruses]
mean = np.mean(distances)
sd = np.std(distances)
for virus in viruses:
virus['fitness_ne'] = -1 * ( ( virus['distance_ne'] - mean) / sd )
print virus['strain'] + ": " + str(virus['fitness_ne'])
def main(in_fname = None):
print "--- Non-epitope fitness at " + time.strftime("%H:%M:%S") + " ---"
if in_fname is None: in_fname='data/virus_epitope.json'
viruses = read_json(in_fname)
append_nonepitope_sites(viruses)
compute(viruses)
# normalize(viruses)
remove_nonepitope_sites(viruses)
# remove_nonepitope_distances(viruses)
out_fname = "data/virus_nonepitope.json"
write_json(viruses, out_fname)
return out_fname
if __name__ == "__main__":
main()
|
from ctypes import c_void_p
from django.contrib.gis.geos.error import GEOSException
# Trying to import GDAL libraries, if available. Have to place in
# try/except since this package may be used outside GeoDjango.
try:
from django.contrib.gis import gdal
except ImportError:
# A 'dummy' gdal module.
class GDALInfo(object):
| HAS_GDAL = False
gdal = GDALInfo()
# NumPy supported?
try:
import numpy
except ImportError:
numpy = False
class GEOSBase(object):
"""
Base object for GEOS objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initia | lly the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr:
return self._ptr
else:
raise GEOSException('NULL GEOS %s pointer encountered.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if ptr is None or isinstance(ptr, self.ptr_type):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
# Property for controlling access to the GEOS object pointers. Using
# this raises an exception when the pointer is NULL, thus preventing
# the C library from attempting to access an invalid memory location.
ptr = property(_get_ptr, _set_ptr)
|
from tensorboardX import SummaryWriter
import unittest
from tensorboardX.record_writer import S3RecordWriter, make_valid_tf_name, GCSRecordWriter
import os
import boto3
from moto import mock_s | 3
os.environ.setdefault("AWS_ACCESS_KEY_ID", "foobar_key")
os.environ.setdefault("AWS_SECRET_ACCESS_KEY", "foobar_secret")
class RecordWriterTest(unittest.TestCase):
@mock_s3
def test_record_writer_s3(self):
client = boto3.client('s3', region_name=' | us-east-1')
client.create_bucket(Bucket='this')
writer = S3RecordWriter('s3://this/is/apen')
bucket, path = writer.bucket_and_path()
assert bucket == 'this'
assert path == 'is/apen'
writer.write(bytes(42))
writer.flush()
def test_make_valid_tf_name(self):
newname = make_valid_tf_name('$ave/&sound')
assert newname == '._ave/_sound'
def test_record_writer_gcs(self):
pass
# we don't have mock test, so expect error here. However,
# Travis CI env won't raise exception for the following code,
# so I commented it out.
# with self.assertRaises(Exception):
# writer = GCSRecordWriter('gs://this/is/apen')
# writer.write(bytes(42))
# writer.flush()
|
name_lat=latname_dist[i], latpath=self.folder)
self.runlattice(jobname=latname_dist[i], folder=self.folder)
already = False
job = self.create_jobname(self.jobname + jobname_dist[i])
self.emto.set_values(
sws=self.sws, jobname=job, latname=latname_dist[i])
# check if calculations are already done
already = self.check_conv(job, folder=self.folder)
if all(already):
conv = (True, True)
else:
if already[0] and not already[1]:
conv = self.runemto(
jobname=job, folder=self.folder, onlyKFCD=True)
else:
conv = self.runemto(jobname=job, folder=self.folder)
# KGRN has crashed, find out why
if conv[0] == False:
self.which_error(job, folder=self.folder)
quit()
en = self.get_energy(job, folder=self.folder, func=self.xc)
en_c66.append(en)
# Convert energies into a numpy array
en_c66 = np.asarray(en_cprime)
# Monoclinic distortion for c44 next
jobname_dist = ['_hcpm0_ca0',
'_hcpm1_ca0',
'_hcpm2_ca0',
'_hcpm3_ca0',
'_hcpm4_ca0',
'_hcpm5_ca0']
latname_dist = ['hcpm0_ca0',
'hcpm1_ca0',
'hcpm2_ca0',
'hcpm3_ca0',
'hcpm4_ca0',
'hcpm5_ca0']
self.emto.set_values(ibz=12, nkx=30, nky=20, nkz=20)
en_c44 = []
for i in range(len(jobname_dist)):
# With hcp the structure depends on the c/a ratio. Therefore we also have
# to generate the corresponding structure files.
self.lattice.distortion(lat='hcp', dist='mono', ca=self.ca, index=i,
deltas=self.elastic_constants_deltas)
self.lattice.set_values(
jobname_lat=latname_dist[i], latpath=self.folder)
self.runlattice(jobname=latname_dist[i], folder=self.folder)
###############################################################
# Atconf related arrays need to be modified because we now have
# a four atom basis.
###############################################################
self.atoms = np.array([self.atoms, self.atoms]).flatten()
self.concs = np.array([self.concs, self.concs]).flatten()
self.iqs = np.zeros(len(self.atoms), dtype='int32')
len_div = len(self.iqs) // 4
for i in range(4):
self.iqs[i * len_div:(i + 1) * len_div] = i + 1
self.splts = np.array([self.splts, self.splts]).flatten()
self.itas = np.array([self.itas, self.itas]).flatten()
self.emto.set_values(atoms=self.atoms, iqs=self.iqs, itas=self.itas,
concs=self.concs, splts=self.splts)
already = False
job = self.create_jobname(self.jobname + jobname_dist[i])
self.emto.set_values(
sws=self.sws, jobname=job, latname=latname_dist[i])
# check if calculations are already done
already = self.check_conv(job, folder=self.folder)
if all(already):
conv = (True, True)
else:
if already[0] and not already[1]:
conv = self.runemto(
jobname=job, folder=self.folder, onlyKFCD=True)
else:
conv = self.runemto(jobname=job, folder=self.folder)
# KGRN has crashed, find out why
if conv[0] == False:
self.which_error(job, folder=self.folder)
quit()
en = self.get_energy(job, folder=self.folder, func=self.xc)
en_c44.append(en)
# Convert energies into a numpy array
en_c44 = np.asarray(en_c44)
# All calculations have been done, now it's time to fit the results
popt_c66, c66_rsq = eos.distortion_fit(deltas, en_c66)
popt_c44, c44_rsq = eos.distortion_fit(deltas, en_c44)
volume = 4.0 / 3.0 * np.pi * self.sws**3
c66 = popt_c66[0] / 2.0 / volume * self.RyBohr3_to_GPa
c44 = popt_c44[0] / 2. | 0 / volume * self.RyBohr3_to_GPa
c11 = self.bmod + c66 + self.ec_analyze_cs * \
(2 * self.e | c_analyze_R - 1)**2 / 18.0
c12 = self.bmod - c66 + self.ec_analyze_cs * \
(2 * self.ec_analyze_R - 1)**2 / 18.0
c13 = self.bmod + 1.0 / 9.0 * self.ec_analyze_cs * (
2 * self.ec_analyze_R**2 + self.ec_analyze_R - 1)
c33 = self.bmod + 2.0 / 9.0 * \
self.ec_analyze_cs * (self.ec_analyze_R + 1)**2
c2 = c33 * (c11 + c12) - 2.0 * c13**2
# Polycrystalline elastic constants
#
# B = bulk modulus
# G = shear modulus
# E = Young modulus
# v = Poisson ratio
# Voigt average
BV = (2 * c11 + 2 * c12 + 4 * c13 + c33) / 9.0
GV = (12 * c44 + 12 * c66 + self.ec_analyze_cs) / 30.0
EV = 9 * BV * GV / (3 * BV + GV)
vV = (3 * BV - 2 * GV) / (6 * BV + 2 * GV)
# Reuss average
BR = self.bmod
GR = 5.0 / 2.0 * (c44 * c66 * c2) / \
((c44 + c66) * c2 + 3.0 * BV * c44 * c66)
ER = 9 * BR * GR / (3 * BR + GR)
vR = (3 * BR - 2 * GR) / (6 * BR + 2 * GR)
# Hill average
BH = (BV + BR) / 2.0
#BH = self.bmod
GH = (GV + GR) / 2.0
EH = 9 * BH * GH / (3 * BH + GH)
vH = (3 * BH - 2 * GH) / (6 * BH + 2 * GH)
# Elastic anisotropy
AVR = (GV - GR) / (GV + GR)
print("")
print('***hcp_elastic_constants***')
print("")
print(self.jobname)
print("")
print('c11(GPa) = {0:6.2f}'.format(c11))
print('c12(GPa) = {0:6.2f}'.format(c12))
print('c13(GPa) = {0:6.2f}'.format(c13))
print('c33(GPa) = {0:6.2f}'.format(c33))
print(
'c44(GPa) = {0:6.2f}, R-squared = {1:8.6f}'.format(c44, c44_rsq))
print(
'c66(GPa) = {0:6.2f}, R-squared = {1:8.6f}'.format(c66, c66_rsq))
print('B (GPa) = {0:6.2f}'.format(self.bmod))
print("")
print('Voigt average:')
print("")
print('BV(GPa) = {0:6.2f}'.format(BV))
print('GV(GPa) = {0:6.2f}'.format(GV))
print('EV(GPa) = {0:6.2f}'.format(EV))
print('vV(GPa) = {0:6.2f}'.format(vV))
print("")
print('Reuss average:')
print("")
print('BR(GPa) = {0:6.2f}'.format(BR))
print('GR(GPa) = {0:6.2f}'.format(GR))
print('ER(GPa) = {0:6.2f}'.format(ER))
print('vR(GPa) = {0:6.2f}'.format(vR))
print("")
print('Hill average:')
print("")
print('BH(GPa) = {0:6.2f}'.format(BH))
print('GH(GPa) = {0:6.2f}'.format(GH))
print('EH(GPa) = {0:6.2f}'.format(EH))
print('vH(GPa) = {0:6.2f}'.format(vH))
print("")
print('Elastic anisotropy:')
print("")
print('AVR(GPa) = {0:6.2f}'.format(AVR))
return
##########################################################################
# |
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
outfile):
from genomicode import mplgraph
from genomicode import filelib
in_data = antecedents
matrix = [x for x in filelib.read_cols(in_data.identifier)]
header = matrix[0]
index = header.index('Confidence')
matrix = matrix[1:]
confidence = [float(i[index]) for i in matrix]
sample = [i[0] for i in matrix]
if confidence == [''] * len(matrix) or 'Correct?' in header:
index = header.index('Predicted_class')
class_value = [i[index] for i in matrix]
label_dict = dict()
label_list = []
i = -1
for label in class_value:
if label not in label_dict.keys():
i = i + 1
label_dict[label] = i
label_list.append(label_dict[label])
yticks = label_dict.keys()
ytick_pos = [label_dict[i] for i in label_dict.keys()]
fig = mplgraph.barplot(label_list,
box_label=sample,
ylim=(-0.5, 1.5),
ytick_pos=ytick_pos,
yticks=yticks,
xtick_rotation='vertical',
ylabel='Prediction',
xlabel='Sample')
fig.savefig(outfile)
else:
fig = mplgraph.barplot(confidence,
box_label=sample,
ylim=(-1.5, 1.5),
xtick_rotation='vertical',
ylabel='Prediction',
xlabel='Sample')
fig.savefig(outfile)
assert filelib.exists_nz(outfile), (
'the output file %s for plot_prediction_bar fails' % outfile
)
def name_outfile(self, antecedents, user_options):
from Betsy import module_utils
original_file = module_utils.get_inputid(antecedents.identifier)
loocv = ''
if antecedents.data.attributes['loocv'] == 'yes':
loocv = 'loocv'
filename = ('prediction_' + original_file + '_' +
antecedents.data.attributes['classify_alg'] + loocv + '.png') |
return filenam | e
|
# Copyr | ight 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project | Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlConfigGeneral(PerlPackage):
"""Config::General - Generic Config Module"""
homepage = "https://metacpan.org/pod/Config::General"
url = "https://cpan.metacpan.org/authors/id/T/TL/TLINDEN/Config-General-2.63.tar.gz"
version('2.63', sha256='0a9bf977b8aabe76343e88095d2296c8a422410fd2a05a1901f2b20e2e1f6fad')
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'openPathTool.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(457, 95)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.formLayout = QtGui.QFormLayout(self.centralwidget)
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.pathInLineEdit = QtGui.QLineEdit(self.centralwidget)
self.pathInLineEdit.setObjectName(_fromUtf8("pathInLineEdit"))
self.formLayout.setWidget(0, QtGui.QFormLayout.SpanningRole, self.pathInLineEdit)
self.pathOutLineEdit = QtGui.QLineEdit(self.centralwidget)
self.pathOutLineEdit.setReadOnly(True)
self.pathOutLineEdit.setObjectName(_fromUtf8("pathOutLineEdit"))
self.formLayout.setWidget(1, QtGui.QFormLayout.SpanningRole, self.pathOutLineEdit)
self.buttonLayout = QtGui.QHBoxLayout()
self.button | Layout.setObjectName(_fromUtf8("buttonLayout"))
self.explorerButton = QtGui.QPushButton(self.centralwidget)
self.explorerButton.setObjectName(_fromUtf8("explorerButton"))
self.buttonLayout.addWidget(self.explorerButton)
spacerItem = QtGui.QS | pacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.buttonLayout.addItem(spacerItem)
self.convertButton = QtGui.QPushButton(self.centralwidget)
self.convertButton.setObjectName(_fromUtf8("convertButton"))
self.buttonLayout.addWidget(self.convertButton)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.buttonLayout.addItem(spacerItem1)
self.closeButton = QtGui.QPushButton(self.centralwidget)
self.closeButton.setObjectName(_fromUtf8("closeButton"))
self.buttonLayout.addWidget(self.closeButton)
self.formLayout.setLayout(2, QtGui.QFormLayout.SpanningRole, self.buttonLayout)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.pathInLineEdit.setPlaceholderText(_translate("MainWindow", "Input Path", None))
self.pathOutLineEdit.setPlaceholderText(_translate("MainWindow", "Output Path", None))
self.explorerButton.setText(_translate("MainWindow", "Open In Explorer", None))
self.convertButton.setText(_translate("MainWindow", "Convert", None))
self.closeButton.setText(_translate("MainWindow", "Close", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Autoregressive State Space Model Tests."""
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.sts import AutoregressiveStateSpaceModel
from tensorflow_probability.python.sts import LocalLevelStateSpaceModel
def ar_explicit_logp(y, coefs, level_scale):
"""Manual log-prob computation for an autoregressive process."""
num_coefs = len(coefs)
lp = 0.
# For the first few steps of y, where previous values
# are not available, we model them as zero-mean with
# stddev `prior_scale`.
for i in range(num_coefs):
zero_padded_y = np.zeros([num_coefs])
zero_padded_y[num_coefs - i:num_coefs] = y[:i]
pred_y = np.dot(zero_padded_y, coefs[::-1])
lp += tfd.Normal(pred_y, level_scale).log_prob(y[i])
for i in range(num_coefs, len(y)):
pred_y = np.dot(y[i - num_coefs:i], coefs[::-1])
lp += tfd.Normal(pred_y, level_scale).log_prob(y[i])
return lp
class _AutoregressiveStateSpaceModelTest(test_util.TestCase):
def testEqualsLocalLevel(self):
# An AR1 process with coef 1 is just a random walk, equivalent to a local
# level model. Test that both models define the same distribution
# (log-prob).
num_timesteps = 10
observed_time_series = self._build_placeholder(
np.random.randn(num_timesteps, 1))
level_scale = self._build_placeholder(0.1)
# We'll test an AR1 process, and also (just for kicks) that the trivial
# embedding as an AR2 process gives the same model.
coefficients_order1 = np.array([1.]).astype(self.dtype)
coefficients_order2 = np.array([1., 0.]).astype(self.dtype)
ar1_ssm = AutoregressiveStateSpaceModel(
num_timesteps=num_timesteps,
coefficients=coefficients_order1,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale]))
ar2_ssm = AutoregressiveStateSpaceModel(
num_timesteps=num_timesteps,
coefficients=coefficients_order2,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag | (
scale_diag=[level_scale, 1.]))
local_level_ssm = LocalLevelStateSpaceModel(
num_timesteps=num_timesteps,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale]))
ar1_lp, ar2_lp, ll_lp = self.evaluate(
(ar1_ssm.log_prob(observed_time_series),
ar2_ssm.log_prob(observed_time_series),
local_level_ssm.log_prob(observed_time_series)))
| self.assertAllClose(ar1_lp, ll_lp)
self.assertAllClose(ar2_lp, ll_lp)
def testLogprobCorrectness(self):
# Compare the state-space model's log-prob to an explicit implementation.
num_timesteps = 10
observed_time_series_ = np.random.randn(num_timesteps)
coefficients_ = np.array([.7, -.1]).astype(self.dtype)
level_scale_ = 1.0
observed_time_series = self._build_placeholder(observed_time_series_)
level_scale = self._build_placeholder(level_scale_)
expected_logp = ar_explicit_logp(
observed_time_series_, coefficients_, level_scale_)
ssm = AutoregressiveStateSpaceModel(
num_timesteps=num_timesteps,
coefficients=coefficients_,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale, 0.]))
lp = ssm.log_prob(observed_time_series[..., tf.newaxis])
self.assertAllClose(self.evaluate(lp), expected_logp)
def testBatchShape(self):
seed = test_util.test_seed(sampler_type='stateless')
# Check that the model builds with batches of parameters.
order = 3
batch_shape = [4, 2]
# No `_build_placeholder`, because coefficients must have static shape.
coefficients = np.random.randn(*(batch_shape + [order])).astype(self.dtype)
level_scale = self._build_placeholder(
np.exp(np.random.randn(*batch_shape)))
ssm = AutoregressiveStateSpaceModel(
num_timesteps=10,
coefficients=coefficients,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=self._build_placeholder(np.ones([order]))))
if self.use_static_shape:
self.assertAllEqual(
tensorshape_util.as_list(ssm.batch_shape), batch_shape)
else:
self.assertAllEqual(self.evaluate(ssm.batch_shape_tensor()), batch_shape)
y = ssm.sample(seed=seed)
if self.use_static_shape:
self.assertAllEqual(tensorshape_util.as_list(y.shape)[:-2], batch_shape)
else:
self.assertAllEqual(self.evaluate(tf.shape(y))[:-2], batch_shape)
def _build_placeholder(self, ndarray):
"""Convert a numpy array to a TF placeholder.
Args:
ndarray: any object convertible to a numpy array via `np.asarray()`.
Returns:
placeholder: a TensorFlow `placeholder` with default value given by the
provided `ndarray`, dtype given by `self.dtype`, and shape specified
statically only if `self.use_static_shape` is `True`.
"""
ndarray = np.asarray(ndarray).astype(self.dtype)
return tf1.placeholder_with_default(
ndarray, shape=ndarray.shape if self.use_static_shape else None)
@test_util.test_all_tf_execution_regimes
class AutoregressiveStateSpaceModelTestStaticShape32(
_AutoregressiveStateSpaceModelTest):
dtype = np.float32
use_static_shape = True
@test_util.test_all_tf_execution_regimes
class AutoregressiveStateSpaceModelTestDynamicShape32(
_AutoregressiveStateSpaceModelTest):
dtype = np.float32
use_static_shape = False
@test_util.test_all_tf_execution_regimes
class AutoregressiveStateSpaceModelTestStaticShape64(
_AutoregressiveStateSpaceModelTest):
dtype = np.float64
use_static_shape = True
del _AutoregressiveStateSpaceModelTest # Don't run tests for the base class.
if __name__ == '__main__':
test_util.main()
|
# Removing stop words
# What to do with the Retweets (RT)?
# Make adjust so that the # and @ are attached to their associated word (i.e. #GOP, @twitter)
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import sys
def remove_stopwords(tweets):
with open(tweets, 'r', buffering=1028) as read_tweet:
for tweet in read_tweet:
#Use stop word method
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(tweet)
filtered_tweet = []
for word in word_tokens:
if word not in stop_words:
# Capture only words not listed in stop_word txt
filtered_tweet.append(word)
print(filtered_tweet)
def | main():
tweets = "/Users/alanseciwa/Desktop/Independent_Study/Sep16-GOP-TweetsONLY/clean_d | ata-TWEETONLY.csv"
remove_stopwords(tweets)
if __name__ == '__main__':
main()
sys.exit() |
"""
WSGI config for Texas LAN Web project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in th | e same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's develop | ment server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
"""
PySAR
Polarimetric SAR decomposition
Contents
--------
decomp_fd(hhhh,vvvv,hvhv,hhvv,numthrd=None) : Freeman-Durden 3-component decomposition
"""
from __future__ import print_function, division
import sys,os
import numpy as np
###===========================================================================================
def decomp_fd(hhhh,vvvv,hvhv,hhvv,null=None,numthrd=None,maxthrd=8):
"""
Freeman-Durden 3-component decomposition
Parameters
----------
hhhh : ndarray
horizontally polarized power
vvvv : ndarray
vertically polarized power
hvhv : ndarray
cross-polarized power
hhvv : ndarray
co-polarized cross product (complex-valued)
null : float or None
null value to exclude from decomposition
numthrd : int or None
number of pthreads; None sets numthrd based on the data array size [None]
maxthrd : int or None
maximum allowable numthrd [8]
Returns
-------
ps : ndarray
surface-scattered power
pd : ndarray
double-bounce power
pv : ndarray
volume-scattered power
Notes
-----
* arrays are returned with the same type as hhhh data
Reference
---------
1. Freeman, A. and Durden, S., "A three-component scattering model for polarimetric SAR data", *IEEE Trans. Geosci. Remote Sensing*, vol. 36, no. 3, pp. 963-973, May 1998.
"""
from pysar.polsar._decomp_modc import free_durden
if not numthrd:
numthrd = np.max([len(hhhh)//1e5, 1])
if numthrd > maxthrd: numthrd = maxthrd
elif numthrd < 1:
raise ValueError('numthrd must be >= 1')
if null:
nullmask = np.abs(hhhh-null) < 1.e-7
nullmask += np.abs(vvvv-null) < 1.e-7
nullmask += np.abs(hvhv-null) < 1.e-7
nullmask += np.abs(hhvv-null) < 1.e-7
hhvv[nullmask] = 0.
hhhhtype = None
if hhhh.dtype != np.float32:
hhhhtype = hhhh.dtype
hhhh = hhhh.astype(np.float32)
vvvv = vvvv.astype(np.float32)
hvhv = hvhv.astype(np.float32)
hhvv = hhvv.astype(np.complex64)
if not all({2-x for x in [hhhh.ndim, vvvv.ndim, hvhv.ndim, hhvv.ndim]}):
hhhh, vvvv = hhhh.flatten(), vvvv.flatten()
hvhv, hhvv = hvhv.flatten(), hhvv.flatten()
P = free_durden(hhhh, vvvv, hvhv, hhvv, numthrd)
if hhhhtype: P = P.astype(hhhhtype)
P = P.reshape(3,-1)
if null: P[0,nullmask], P[1,nullmask], P[2,nullmask] = null, null, null
return P[0,:], P[1,:], P[2,:]
###---------------------------------------------------------------------------------
def decomp_haa(hhhh,vvvv,hvhv,hhhv,hhvv,hvvv,matform='C',null=None,numthrd=None,maxthrd=8):
"""
Cloude-Pottier H/A/alpha polarimetric decomposition
Parameters
----------
hhhh : ndarray
horizontal co-polarized power (or 0.5|HH + VV|^2 if matform = 'T')
vvvv : ndarray
vertical co-polarized power (or 0.5|HH - VV|^2 if matform = 'T')
hvhv : ndarray
cross-polarized power (2|HV|^2 for matform = 'T')
hhhv : ndarray
HH.HV* cross-product (or 0.5(HH+VV)(HH-VV)* for matform = 'T')
hhvv : ndarray
HH.VV* cross-product (or HV(HH+VV)* for | matform = 'T')
hvvv : ndarray
HV.VV* cross-product (or HV(HH-VV)* for matform = 'T')
matform : str {'C' or 'T'}
form of input matrix entries: 'C' for covariance matrix and
'T' for coherency matrix ['C'] (see ref. 1)
null : | float or None
null value to exclude from decomposition
numthrd : int or None
number of pthreads; None sets numthrd based on the data array size [None]
maxthrd : int or None
maximum allowable numthrd [8]
Returns
-------
H : ndarray
entropy (H = -(p1*log_3(p1) + p2*log_3(p2) + p3*log_3(p3))
where pi = lam_i/(hhhh+vvvv+hvhv)) and lam is an eigenvalue
A : ndarray
anisotropy (A = (lam_2-lam_3)/(lam_2+lam_3) --> lam_1 >= lam_2 >= lam_3
alpha : ndarray
alpha angle in degrees (see ref. 1)
Notes
-----
* arrays are returned with the same type as hhhh data
* if covariance matrix form is used, do not multiply entries by any constants
Reference
---------
1. Cloude, S. and Pottier, E., "An entropy based classification scheme for land applications of polarimetric SAR", *IEEE Trans. Geosci. Remote Sensing*, vol. 35, no. 1, pp. 68-78, Jan. 1997.
"""
from pysar.polsar._decomp_modc import cloude_pot
if matform == 'C' or matform == 'c':
mtf = 1
elif matform == 'T' or matform == 't':
mtf = 0
else:
raise ValueError("matform must be 'C' or 'T'")
if not numthrd:
numthrd = np.max([len(hhhh)//1e5, 1])
if numthrd > maxthrd: numthrd = maxthrd
elif numthrd < 1:
raise ValueError('numthrd must be >= 1')
if null:
nullmask = np.abs(hhhh-null) < 1.e-7
nullmask += np.abs(vvvv-null) < 1.e-7
nullmask += np.abs(hvhv-null) < 1.e-7
nullmask += np.abs(hhhv-null) < 1.e-7
nullmask += np.abs(hhvv-null) < 1.e-7
nullmask += np.abs(hvvv-null) < 1.e-7
hhhh[nullmask], vvvv[nullmask] = 0., 0.
hvhv[nullmask] = 0.
hhhhtype = None
if hhhh.dtype != np.float32:
hhhhtype = hhhh.dtype
hhhh = hhhh.astype(np.float32)
vvvv = vvvv.astype(np.float32)
hvhv = hvhv.astype(np.float32)
hhhv = hhhv.astype(np.complex64)
hhvv = hhvv.astype(np.complex64)
hvvv = hvvv.astype(np.complex64)
if not all({2-x for x in [hhhh.ndim, vvvv.ndim, hvhv.ndim, hhhv.ndim, hhvv.ndim, hvvv.ndim]}):
hhhh, vvvv = hhhh.flatten(), vvvv.flatten()
hvhv, hhvv = hvhv.flatten(), hhvv.flatten()
hhhv, hvvv = hhhv.flatten(), hvvv.flatten()
P = cloude_pot(hhhh, vvvv, hvhv, hhhv, hhvv, hvvv, mtf, numthrd)
if hhhhtype: P = P.astype(hhhhtype)
P = P.reshape(3,-1)
if null: P[0,nullmask], P[1,nullmask], P[2,nullmask] = null, null, null
return P[0,:], P[1,:], P[2,:]
def decomp_cp(hhhh,vvvv,hvhv,hhhv,hhvv,hvvv,matform='C',null=None,numthrd=None,maxthrd=8):
__doc__ = decomp_haa.__doc__
return decomp_haa(hhhh=hhhh,vvvv=vvvv,hvhv=hvhv,hhhv=hhhv,hhvv=hhvv,hvvv=hvvv,
matform=matform,null=null,numthrd=numthrd,maxthrd=maxthrd)
|
isted with its state")
self.failUnlessEqual(
value.method(2.5), 5.0,
"Class instance wasn't properly persisted with its method")
dList = []
for name in ('foo', 'bar'):
dList.append(self.i.t.load(name).addCallback(gotValue, name))
return DeferredList(dList)
def test_loadAbsent(self):
def gotValue(value):
self.failUnless(
isinstance(value, items.Missing),
"Should have returned 'Missing' object, not '%s'!" % \
str(value))
def gotExpectedError(failure):
self.fail("Shouldn't have raised error on missing value")
return self.i.t.load('invalid').addCallbacks(
gotValue, gotExpectedError)
def test_loadAll(self):
def loaded(items):
itemKeys = items.keys()
itemKeys.sort()
self.failUnlessEqual(itemKeys, ['bar', 'foo'])
return self.i.t.loadAll().addCallback(loaded)
def insertLots(self, callback):
noviceThing = MockThing()
experiencedThing = MockThing()
experiencedThing.method(0)
self.whatToInsert = {
'alpha':5937341,
'bravo':'abc',
'charlie':-3.1415,
'delta':(1,2,3),
'echo':True,
'foxtrot':False,
'golf':noviceThing,
'hotel':experiencedThing,
'india':MockThing
}
dList = []
for name, value in self.whatToInsert.iteritems():
dList.append(self.i.t.insert(name, value))
return DeferredList(dList).addCallback(
callback, self.whatToInsert.copy())
def test_insert(self):
def done(null, items):
def check():
table = self.i.t.sasync_items
for name, inserted in items.iteritems():
value = table.select(
and_(table.c.group_id == 123,
table.c.name == name)
).execute().fetchone()['value']
msg = "Inserted '{}:{}' ".format(name, inserted) +\
"but read '{}' back from the database!".format(value)
self.failUnlessEqual(value, inserted, msg)
for otherName, otherValue in items.iteritems():
if otherName != name and value == otherValue:
self.fail(
"Inserted item '%s' is equal to item '%s'" % \
(name, otherName))
return self.i.t.deferToQueue(check)
return self.insertLots(done)
def test_deleteOne(self):
def gotOriginal(value):
self.failUnlessEqual(value, 'OK')
return self.i.t.delete('foo').addCallback(getAfte | rDeleted)
def getAfterDeleted(null):
return self.i.t.load('foo').addCallback(checkIfDeleted)
| def checkIfDeleted(value):
self.failUnless(isinstance(value, items.Missing))
return self.i.t.load('foo').addCallback(gotOriginal)
def test_deleteMultiple(self):
def getAfterDeleted(null):
return self.i.t.loadAll().addCallback(checkIfDeleted)
def checkIfDeleted(values):
self.failUnlessEqual(values, {})
return self.i.t.delete('foo', 'bar').addCallback(getAfterDeleted)
def test_namesFew(self):
def got(names):
names.sort()
self.failUnlessEqual(names, ['bar', 'foo'])
return self.i.t.names().addCallback(got)
def test_namesMany(self):
def get(null, items):
return self.i.t.names().addCallback(got, items.keys())
def got(names, shouldHave):
shouldHave += ['foo', 'bar']
names.sort()
shouldHave.sort()
self.failUnlessEqual(names, shouldHave)
return self.insertLots(get)
def test_update(self):
def update(null, items):
return DeferredList([
self.i.t.update('alpha', 1),
self.i.t.update('bravo', 2),
self.i.t.update('charlie', 3)
]).addCallback(check, items)
def check(null, items):
return self.i.t.loadAll().addCallback(loaded, items)
def loaded(loadedItems, controlItems):
controlItems.update({'alpha':1, 'bravo':2, 'charlie':3})
for name, value in controlItems.iteritems():
self.failUnlessEqual(
value, loadedItems.get(name, 'Impossible Value'))
return self.insertLots(update)
class TestItems(ItemsMixin, TestCase):
def setUp(self):
self.i = items.Items(GROUP_ID, "sqlite:///%s" % db)
def test_insertAndLoad(self):
nouns = ('lamp', 'rug', 'chair')
def first(null):
return self.i.loadAll().addCallback(second)
def second(items):
self.failUnlessEqual(items['Nouns'], nouns)
return self.i.insert('Nouns', nouns).addCallback(first)
def test_insertAndDelete(self):
items = {'a':0, 'b':1, 'c':2, 'd':3, 'e':4}
def first(null):
return self.i.delete('c').addCallback(second)
def second(null):
return self.i.names().addCallback(third)
def third(nameList):
desiredList = [x for x in items.keys() if x != 'c']
desiredList.sort()
nameList.sort()
self.failUnlessEqual(nameList, desiredList)
dL = []
for name, value in items.iteritems():
dL.append(self.i.insert(name, value))
return DeferredList(dL).addCallback(first)
def test_insertAndLoadAll(self):
items = {'a':0, 'b':1, 'c':2, 'd':3, 'e':4}
def first(null):
return self.i.loadAll().addCallback(second)
def second(loadedItems):
self.failUnlessEqual(loadedItems, items)
dL = []
for name, value in items.iteritems():
dL.append(self.i.insert(name, value))
return DeferredList(dL).addCallback(first)
def test_insertAndUpdate(self):
items = {'a':0, 'b':1, 'c':2, 'd':3, 'e':4}
def first(null):
return self.i.update('b', 10).addCallback(second)
def second(null):
return self.i.loadAll().addCallback(third)
def third(loadedItems):
expectedItems = {'a':0, 'b':10, 'c':2, 'd':3, 'e':4}
self.failUnlessEqual(loadedItems, expectedItems)
dL = []
for name, value in items.iteritems():
dL.append(self.i.insert(name, value))
return DeferredList(dL).addCallback(first)
class TestItemsIntegerNames(ItemsMixin, TestCase):
def setUp(self):
self.items = {'1':'a', 2:'b', 3:'c', '04':'d'}
self.i = items.Items(GROUP_ID, "sqlite:///%s" % db, nameType=int)
def insertStuff(self):
dL = []
for name, value in self.items.iteritems():
dL.append(self.i.insert(name, value))
return DeferredList(dL)
def test_names(self):
def first(null):
return self.i.names().addCallback(second)
def second(names):
names.sort()
self.failUnlessEqual(names, [1, 2, 3, 4])
return self.insertStuff().addCallback(first)
def test_loadAll(self):
def first(null):
return self.i.loadAll().addCallback(second)
def second(loaded):
self.failUnlessEqual(loaded, {1:'a', 2:'b', 3:'c', 4:'d'})
return self.insertStuff().addCallback(first)
class TestItemsStringNames(ItemsMixin, TestCase):
def setUp(self):
self.items = {'1':'a', 2:'b', u'3':'c', "4":'d'}
self.i = items.Items(GROUP_ID, "sqlite:///%s" % db, nameType=str)
def insertStuff(self):
dL = []
for name, value in self.items.iteritems():
dL.append(self.i.insert(name, value))
return DeferredList(dL)
def test_names(self):
def first(null):
return self.i.names().addCallback(second)
def second(names):
names.sort()
|
import json
import time
from _md5 import md5
import requests
import RolevPlayer as r
def now_playing_last_fm(artist, track):
update_now_playing_sig = md5(("api_key" + r.API_KEY +
"artist" + artist +
"method" + "track.updateNowPlaying" +
"sk" + r.SK +
"track" + track +
r.SECRET).encode('utf-8')).hexdigest()
url = "http://ws.audioscrobbler.com/2.0/?method=track.updateNowPlaying" + \
"&api_key=" + r.API_KEY + \
"&api_sig=" + update_now_playing_sig + \
"&artist=" + artist + \
"&format=json" + \
"&sk=" + r.SK + \
"&track=" | + trac | k
req = requests.post(url).text
json_obj = json.loads(req)
def scrobble(artist, track):
# this gives us a timestamp, casted to integer
ts = time.time()
scrobbling_sig = md5(("api_key" + r.API_KEY +
"artist" + artist +
"method" + "track.scrobble" +
"sk" + r.SK +
"timestamp" + str(ts) +
"track" + track +
r.SECRET).encode('utf-8')).hexdigest()
req = requests.post(
"http://ws.audioscrobbler.com/2.0/?method=track.scrobble" +
"&api_key=" + r.API_KEY +
"&api_sig=" + scrobbling_sig +
"&artist=" + artist +
"&format=json" +
"&sk=" + r.SK +
"×tamp=" + str(ts) +
"&track=" + track).text
json_obj = json.loads(req)
|
#!/usr/bin/env p | ython
data = {
"default_prefix": "OSVC_COMP_REMOVE_FILES_",
"example_value": """
[
"/tmp/foo",
"/bar/to/delete"
]
""",
"description": """* Verify files and file trees are uninstalled
""",
"form_definition": """
Desc: | |
A rule defining a set of files to remove, fed to the 'remove_files' compliance object.
Css: comp48
Outputs:
-
Dest: compliance variable
Class: remove_files
Type: json
Format: list
Inputs:
-
Id: path
Label: File path
DisplayModeLabel: ""
LabelCss: edit16
Mandatory: Yes
Help: You must set paths in fully qualified form.
Type: string
""",
}
import os
import sys
import re
import json
from glob import glob
import shutil
sys.path.append(os.path.dirname(__file__))
from comp import *
blacklist = [
"/",
"/root"
]
class CompRemoveFiles(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
patterns = self.get_rules()
patterns = sorted(list(set(patterns)))
self.files = self.expand_patterns(patterns)
if len(self.files) == 0:
pinfo("no files matching patterns")
raise NotApplicable
def expand_patterns(self, patterns):
l = []
for pattern in patterns:
l += glob(pattern)
return l
def fixable(self):
return RET_NA
def check_file(self, _file):
if not os.path.exists(_file):
pinfo(_file, "does not exist. on target.")
return RET_OK
perror(_file, "exists. shouldn't")
return RET_ERR
def fix_file(self, _file):
if not os.path.exists(_file):
return RET_OK
try:
if os.path.isdir(_file) and not os.path.islink(_file):
shutil.rmtree(_file)
else:
os.unlink(_file)
pinfo(_file, "deleted")
except Exception as e:
perror("failed to delete", _file, "(%s)"%str(e))
return RET_ERR
return RET_OK
def check(self):
r = 0
for _file in self.files:
r |= self.check_file(_file)
return r
def fix(self):
r = 0
for _file in self.files:
r |= self.fix_file(_file)
return r
if __name__ == "__main__":
main(CompRemoveFiles)
|
conanfile = """from conans import ConanFile, CMake, tools
import os
class {package_name}Conan(ConanFile):
name = "{name}"
version = "{version}"
license = "<Put the package license here>"
| url = "<Package recipe repository url here, for issues about the package>"
settings = "os", "compiler", "build_type", "arch"
options = {{"shared": [True, False]}}
default_options = "shared=False"
generators = "cmake"
def s | ource(self):
self.run("git clone https://github.com/memsharded/hello.git")
self.run("cd hello && git checkout static_shared")
# This small hack might be useful to guarantee proper /MT /MD linkage in MSVC
# if the packaged project doesn't have variables to set it properly
tools.replace_in_file("hello/CMakeLists.txt", "PROJECT(MyHello)", '''PROJECT(MyHello)
include(${{CMAKE_BINARY_DIR}}/conanbuildinfo.cmake)
conan_basic_setup()''')
def build(self):
cmake = CMake(self.settings)
shared = "-DBUILD_SHARED_LIBS=ON" if self.options.shared else ""
self.run('cmake hello %s %s' % (cmake.command_line, shared))
self.run("cmake --build . %s" % cmake.build_config)
def package(self):
self.copy("*.h", dst="include", src="hello")
self.copy("*hello.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["hello"]
"""
conanfile_header = """from conans import ConanFile, tools
import os
class {package_name}Conan(ConanFile):
name = "{name}"
version = "{version}"
license = "<Put the package license here>"
url = "<Package recipe repository url here, for issues about the package>"
# No settings/options are necessary, this is header only
def source(self):
'''retrieval of the source code here. Remember you can also put the code in the folder and
use exports instead of retrieving it with this source() method
'''
#self.run("git clone ...") or
#tools.download("url", "file.zip")
#tools.unzip("file.zip" )
def package(self):
self.copy("*.h", "include")
"""
test_conanfile = """from conans import ConanFile, CMake
import os
channel = os.getenv("CONAN_CHANNEL", "{channel}")
username = os.getenv("CONAN_USERNAME", "{user}")
class {package_name}TestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
requires = "{name}/{version}@%s/%s" % (username, channel)
generators = "cmake"
def build(self):
cmake = CMake(self.settings)
self.run('cmake "%s" %s' % (self.conanfile_directory, cmake.command_line))
self.run("cmake --build . %s" % cmake.build_config)
def imports(self):
self.copy("*.dll", "bin", "bin")
self.copy("*.dylib", "bin", "bin")
def test(self):
os.chdir("bin")
self.run(".%sexample" % os.sep)
"""
test_cmake = """PROJECT(PackageTest)
cmake_minimum_required(VERSION 2.8.12)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()
ADD_EXECUTABLE(example example.cpp)
TARGET_LINK_LIBRARIES(example ${CONAN_LIBS})
"""
test_main = """#include <iostream>
#include "hello.h"
int main() {
hello();
std::cout<<"*** Running example, will fail by default, implement yours! ***\\n";
return -1; // fail by default, remember to implement your test
}
"""
|
"""Tests for the Linky config flow."""
from pylinky.exceptions import (
PyLinkyAccessException,
PyLinkyEnedisException,
PyLinkyException,
PyLinkyWrongLoginException,
)
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.linky.const import DEFAULT_TIMEOUT, DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, SOURCE_USER
from homeassistant.const import CONF_PASSWORD, CONF_TIMEOUT, CONF_USERNAME
from homeassistant.helpers.typing import HomeAssistantType
from tests.async_mock import Mock, patch
from tests.common import MockConfigEntry
USERNAME = "username@hotmail.fr"
USERNAME_2 = "username@free.fr"
PASSWORD = "password"
TIMEOUT = 20
@pytest.fixture(name="login")
def mock_controller_login():
"""Mock a successful login."""
with patch(
"homeassistant.components.linky.config_flow.LinkyClient"
) as service_mock:
service_mock.return_value.login = Mock(return_value=True)
service_mock.return_value.close_session = Mock(return_value=None)
yield service_mock
@pytest.fixture(name="fetch_data")
def mock_controller_fetch_data():
"""Mock a successful get data."""
with patch(
"homeassistant.components.linky.config_flow.LinkyClient"
) as service_mock:
service_mock.return_value.fetch_data = Mock(return_value={})
service_mock.return_value.close_session = Mock(return_value=None)
yield service_mock
async def test_user(hass: HomeAssistantType, login, fetch_data):
"""Test user config."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=None
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# test with all provided
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == USERNAME
assert result["title"] == USERNAME
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_TIMEOUT] == DEFAULT_TIMEOUT
async def test_import(hass: HomeAssistantType, login, fetch_data):
"""Test import step."""
# import with username and password
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == USERNAME
assert result["title"] == USERNAME
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_TIMEOUT] == DEFAULT_TIMEOUT
# import with all
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_USERNAME: USERNAME_2,
CONF_PASSWORD: PASSWORD,
CONF_TIMEOUT: TIMEOUT,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == USERNAME_2
assert result["title"] == USERNAME_2
assert result["data"][CONF_USERNAME] == USERNAME_2
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_TIMEOUT] == TIMEOUT
async def test_abort_if_already_setup(hass: HomeAssistantType, login, fetch_data):
"""Test we abort if Linky is already setup."""
MockConfigEntry(
domain=DOMAIN,
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
unique_id=USERNAME,
).add_to_hass(hass)
# Should fail, same USERNAME (import)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
# Should fail, same USERNAME (flow)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_login_failed(hass: HomeAssistantType, login):
"""Test when we have errors during login."""
login.return_value.login.side_effect = PyLinkyAccessException()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "access"}
hass.config_entries.flow.async_abort(result["flow_id"])
login.return_value.login.side_effect = PyLinkyWrongLoginException()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "wrong_login"}
hass.config_entries.flow.async_abort(result["flow_id"])
async def test_fetch_failed(hass: HomeAssistantType, login):
"""Test when we have errors during fetch."""
login.return_value.fetch_data.side_effect = PyLinkyAccessException()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF | _PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "access"}
| hass.config_entries.flow.async_abort(result["flow_id"])
login.return_value.fetch_data.side_effect = PyLinkyEnedisException()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "enedis"}
hass.config_entries.flow.async_abort(result["flow_id"])
login.return_value.fetch_data.side_effect = PyLinkyException()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "unknown"}
hass.config_entries.flow.async_abort(result["flow_id"])
|
s:\n{}'.format(pprint.pformat(late_variables)))
return late_variables
def get_secret_variables(sources):
return list(set(var_name for source in sources for var_name in source.secret))
def get_final_arguments(resolver):
return {k: v.value for k, v in resolver.arguments.items() if v.is_finalized}
def format_expanded_config(config):
return textwrap.indent(json_prettyprint(config), prefix=(' ' * 3))
def user_arguments_to_yaml(user_arguments: dict):
return textwrap.indent(
yaml.dump(user_arguments, default_style='|', default_flow_style=False, indent=2),
prefix=(' ' * 3),
)
def generate(
arguments,
extra_templates=list(),
extra_sources=list(),
extra_targets=list()):
# To maintain the old API where we passed arguments rather than the new name.
user_arguments = arguments
arguments = None
sources, targets, templates = get_dcosconfig_source_target_and_templates(
user_arguments, extra_templates, extra_sources)
resolver = validate_and_raise(sources, targets + extra_targets)
argument_dict = get_final_arguments(resolver)
late_variables = get_late_variables(resolver, sources)
secret_builtins = ['expanded_config_full', 'user_arguments_full', 'config_yaml_full']
secret_variables = set(get_secret_variables(sources) + secret_builtins)
masked_value = '**HIDDEN**'
# Calculate values for builtin variables.
user_arguments_masked = {k: (masked_value if k in secret_variables else v) for k, v in user_arguments.items()}
argument_dict['user_arguments_full'] = json_prettyprint(user_arguments)
argument_dict['user_arguments'] = json_prettyprint(user_arguments_masked)
argument_dict['config_yaml_full'] = user_arguments_to_yaml(user_arguments)
argument_dict['config_yaml'] = user_arguments_to_yaml(user_arguments_masked)
# The expanded_config and expanded_config_full variables contain all other variables and their values.
# expanded_config is a copy of expanded_config_full with secret values removed. Calculating these variables' values
# must come after the calculation of all other variables to prevent infinite recursion.
# TODO(cmaloney): Make this late-bound by gen.internals
expanded_config_full = {
k: v for k, v in argument_dict.items()
# Omit late-bound variables whose values have not yet been calculated.
if not v.startswith(gen.internals.LATE_BIND_PLACEHOLDER_START)
}
expanded_config_scrubbed = {k: v for k, v in expanded_config_full.items() if k not in secret_variables}
argument_dict['expanded_config_full'] = format_expanded_config(expanded_config_full)
argument_dict['expanded_config'] = format_expanded_config(expanded_config_scrubbed)
log.debug(
"Final arguments:" + json_prettyprint({
# Mask secret config values.
k: (masked_value if k in secret_variables else v) for k, v in argument_dict.items()
})
)
# Fill in the template parameters
# TODO(cmaloney): render_templates should ideally take the template targets.
rendered_templates = render_templates(templates, argument_dict)
# Validate there aren't any unexpected top level directives in any of the files
# (likely indicates a misspelling)
for name, template in rendered_templates.items():
if name == 'dcos-services.yaml': # yaml list of the service files
assert isinstance(template, list)
elif name == 'cloud-config.yaml':
assert template.keys() <= CLOUDCONFIG_KEYS, template.keys()
elif isinstance(template, str): # Not a yaml template
pass
else: # yaml template file
log.debug("validating template file %s", name)
assert template.keys() <= PACKAGE_KEYS, template.keys()
stable_artifacts = []
channel_artifacts = []
# Find all files which contain late bind variables and turn them into a "late bind package"
# TODO(cmaloney): check there are no late bound variables in cloud-config.yaml
late_files, regular_files = extract_files_containing_late_variables(
rendered_templates['dcos-config.yaml']['package'])
# put the regular files right back
rendered_templates['dcos-config.yaml'] = {'package': regular_files}
# Render cluster package list artifact.
cluster_package_list_filename = 'package_lists/{}.package_list.json'.format(
argument_dict['cluster_package_list_id']
)
os.makedirs(os.path.dirname(cluster_package_list_filename), mode=0o755, exist_ok=True)
write_string(cluster_package_list_filename, argument_dict['cluster_packages'])
log.info('Cluster package list: {}'.format(cluster_package_list_filename))
stable_artifacts.append(cluster_package_list_filename)
def make_package_filename(package_id, extension):
return 'packages/{0}/{1}{2}'.format(
package_id.name,
repr(package_id),
extension)
# Render all the cluster packages
cluster_package_info = {}
# Prepare late binding config, if any.
late_package = build_late_package(late_files, argument_dict['config_id'], argument_dict['provider'])
if late_variables:
# Render the late binding package. This package will be downloaded onto
# each cluster node during bootstrap and rendered into the final config
# using the values from the late config file.
late_package_id = PackageId(late_package['name'])
late_package_filename = make_package_filename(late_package_id, '.dcos_config')
os.makedirs(os.path.dirname(late_package_filename), mode=0o755)
write_yaml(late_package_filename, {'package': late_package['package']}, default_flow_style=False)
log.info('Package filename: {}'.format(late_package_filename))
stable_artifacts.append(late_package_filename)
# Add the late config file to cloud config. The expressions in
# late_variables will be resolved by the service handling the cloud
# config (e.g. Amazon CloudFormation). The rendered late config file
# on a cluster node's filesystem will contain the final values.
rendered_templates['cloud-config.yaml']['root'].append({
'path': '/etc/mesosphere/setup-flags/late-config.yaml',
'permissions': '0644',
'owner': 'root',
# TODO(cmaloney): don't prettyprint to save bytes.
# NOTE: Use yaml here simply to make avoiding painful escaping and
# unescaping easier.
'content': render_yaml({
'late_bound_package_id': late_package['name'],
'bound_values': late_variables
})})
# Collect metadata for cluster packages.
for package_id_str in json.loads(argument_dict['cluster_packages']):
package_id = PackageId(package_id_str)
package_filename = make_package_filename(package_id, '.tar.xz')
cluster_package_info[package_id.name] = {
'id': package_id_str,
'filename': package_filename
}
# Render config packages.
config_package_ids = json.loads(argument_dict['config_package_ids'])
for package_id_str in config_package_ids:
package_id = PackageId(package_id_str)
package_filename = cluster_package_info[package_id.name]['filename']
do_gen_package(rendered_templates[package_id.name + '.yaml'], cluster_package_info[package_id.name]['filename'])
stable_artifacts.append(package_filename)
# Convert cloud-config to just contain write_files rather than root
cc = rendered_templates['cloud-config.yaml']
# Shouldn't contain any packages. Providers should pull what they need to
# late bind out of other packages via cc_package_file.
assert 'package' not in cc
cc_root = cc.pop('root', [])
# Make sure write_files exists.
assert 'write_files' not in cc
cc['write_files'] = []
# Do the transform
for item in cc_root:
assert item['path'].startswith('/')
cc['write_files'].append(item)
rendered_templates['c | loud-con | fig.yaml'] = cc
# Add utils that need to be def |
# disk.py
#
# Copyright (C) 2014-2016 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU GPL v2
#
# Utilities relating to disk manangement
from kano.utils.shell import run_cmd
def get_free_space(path="/"):
"""
Returns the amount of free space in certain location | in MB
:param path: The location to measure the free space at.
:type path: str
:return: Number of free megabytes.
:rtype: int
"""
out, dummy_err, dummy_rv = run_cmd("df {}".format(path))
dummy_device, dummy_size, dummy_used, free, dummy_percent, dummy_ | mp = \
out.split('\n')[1].split()
return int(free) / 1024
def get_partition_info():
device = '/dev/mmcblk0'
try:
cmd = 'lsblk -n -b {} -o SIZE'.format(device)
stdout, dummy_stderr, returncode = run_cmd(cmd)
if returncode != 0:
from kano.logging import logger
logger.warning("error running lsblk")
return []
lines = stdout.strip().split('\n')
sizes = map(int, lines)
return sizes
except Exception:
return []
|
# -*- coding: utf-8 -*-
from __future__ import unic | ode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20150819_0050'),
]
operations = [
migrations.AlterUniqueTogether(
name='test',
unique_togeth | er=set([('owner', 'name')]),
),
]
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABIL | ITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
|
microcode = '''
# All the memory versions need to use LOCK, regardless of if it was set
def macroop XCHG_R_R
{
# Use the xor trick instead of moves to reduce register pressure.
# This probably doesn't make much of a difference, but it's easy.
xor reg, reg, regm
xor regm, regm, reg
xor reg, reg, regm
};
def macroop XCHG_R_M
{
ldstl t1, seg, sib, disp
stul reg, seg, sib, disp
mov reg, reg, t1
};
def macroop XCHG_R_P
{
rdip t7
ldstl t1, seg, riprel, disp
stul reg, seg, riprel, disp
mov reg, reg, t1
};
def macroop XCHG_M_R
{
ldstl t1, seg, sib, disp
stul reg, seg, sib, disp
mov reg, reg, t1
};
def macroop XCHG_P_R
{
rdip t7
ldstl t1, seg, riprel, disp
stul reg, seg, riprel, disp
mov reg, reg, t1
};
def macroop XCHG_LOCKED_M_R
{
ldstl t1, seg, sib, disp
stul reg, seg, sib, disp
mov reg, reg, t1
};
def macroop XCHG_LOCKED_P_R
{
rdip t7
ldstl t1, seg, riprel, disp
stul reg, seg, riprel, disp
mov reg, reg, t1
};
'''
|
import EightP | uzzleWithHeuristics as Problem
# puzzle0:
CREATE_INITIAL_STATE = lambda: Problem.St | ate([0, 1, 2, 3, 4, 5, 6, 7, 8]) |
expiry(604800)
log.debug("Setting user session to never expire")
else:
request.session.set_expiry(0)
except Exception as e:
AUDIT_LOG.critical("Login failed - Could not create session. Is memcached running?")
log.critical("Login failed - Could not create session. Is memcached running?")
log.exception(e)
raise
try_change_enrollment(request)
statsd.increment("common.student.successful_login")
response = HttpResponse(json.dumps({'success': True}))
# set the login cookie for the edx marketing site
# we want this cookie to be accessed via javascript
# so httponly is set to None
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
response.set_cookie(settings.EDXMKTG_COOKIE_NAME,
'true', max_age=max_age,
expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,
path='/',
secure=None,
httponly=None)
return response
AUDIT_LOG.warning(u"Login failed - Account not active for user {0}, resending activation".format(username))
reactivation_email_for_user(user)
not_activated_msg = _("This account has not been activated. We have sent another activation message. Please check your e-mail for the activation instructions.")
return HttpResponse(json.dumps({'success': False,
'value': not_activated_msg}))
@ensure_csrf_cookie
def logout_user(request):
'''
HTTP request to log out the user. Redirects to marketing page.
Deletes both the CSRF and sessionid cookies so the marketing
site can determine the logged in state of the user
'''
# We do not log here, because we have a handler registered
# to perform logging on successful logouts.
logout(request)
response = redirect('/')
response.delete_cookie(settings.EDXMKTG_COOKIE_NAME,
path='/',
domain=settings.SESSION_COOKIE_DOMAIN)
return response
@login_required
@ensure_csrf_cookie
def change_setting(request):
''' JSON call to change a profile setting: Right now, location
'''
# TODO (vshnayder): location is no longer used
up = UserProfile.objects.get(user=request.user) # request.user.profile_cache
if 'location' in request.POST:
up.location = request.POST['location']
up.save()
return HttpResponse(json.dumps({'success': True,
'location': up.location, }))
def _do_create_account(post_vars):
"""
Given cleaned post variables, create the User and UserProfile objects, as well as the
registration for this user.
Returns a tuple (User, UserProfile, Registration).
Note: this function is also used for creating test users.
"""
user = User(username=post_vars['username'],
email=post_vars['email'],
is_active=False)
user.set_password(post_vars['password'])
registration = Registration()
# TODO: Rearrange so that if part of the process fails, the whole process fails.
# Right now, we can have e.g. no registration e-mail sent out and a zombie account
try:
user.save()
except IntegrityError:
js = {'success': False}
# Figure out the cause of the integrity error
if len(User.objects.filter(username=post_vars['username'])) > 0:
js['value'] = _("An account with the Public Username '{username}' already exists.").format(username=post_vars['username'])
js['field'] = 'username'
return HttpResponse(json.dumps(js))
if len(User.objects.filter(email=post_vars['email'])) > 0:
js['value'] = _("An account with the Email '{email}' already exists.").format(email=post_vars['email'])
js['field'] = 'email'
return HttpResponse(json.dumps(js))
raise
registration.register(user)
profile = UserProfile(user=user)
profile.name = post_vars['name']
profile.level_of_education = post_vars.get('level_of_education')
profile.gender = post_vars.get('gender')
profile.mailing_address = post_vars.get('mailing_address')
profile.goals = post_vars.get('goals')
try:
profile.year_of_birth = int(post_vars['year_of_birth'])
except (ValueError, KeyError):
# If they give us garbage, just ignore it instead
# of asking them to put an integer.
profile.year_of_birth = None
try:
profile.save()
except Exception:
log.exception("UserProfile creation failed for user {id}.".format(id=user.id))
return (user, profile, registration)
@ensure_csrf_cookie
def create_account(request, post_override=None):
'''
JSON call to create new edX account.
Used by form in signup_modal.html, which is included into navigation.html
'''
js = {'success': False}
post_vars = post_override if post_override else request.POST
# if doing signup for an external authorization, then get email, password, name from the eamap
# don't use the ones from the form, since the user could have hacked those
# unless originally we didn't get a valid email or name from the external auth
DoExternalAuth = 'ExternalAuthMap' in request.session
if DoExternalAuth:
eamap = request.session['ExternalAuthMap']
try:
validate_email(eamap.external_email)
email = eamap.external_email
except ValidationError:
email = post_vars.get('email', '')
if eamap.external_name.strip() == '':
name = post_vars.get('name', '')
else:
name = eamap.external_name
password = eamap.internal_password
post_vars = dict(post_vars.items())
post_vars.update(dict(email=email, name=name, password=password))
log.debug(u'In create_account with external_auth: user = %s, email=%s', name, email)
# Confirm we have a properly formed request
for a in ['username', 'email', 'password', 'name']:
if a not in post_vars:
js['value'] = _("Error (401 {field}). E-mail us.").format(field=a)
js['field'] = a
return HttpResponse(json.dumps(js))
if post_vars.get('honor_code', 'false') != u'true':
js[' | value'] = _("To enroll, you must follow the honor code.").format(field=a)
js['field'] = 'honor_code'
return HttpResponse(json.dumps(js))
# Can't have terms of service for certain SHIB users, like at Stanford
tos_not_required = settings.MITX_FEATURES.g | et("AUTH_USE_SHIB") \
and settings.MITX_FEATURES.get('SHIB_DISABLE_TOS') \
and DoExternalAuth and ("shib" in eamap.external_domain)
if not tos_not_required:
if post_vars.get('terms_of_service', 'false') != u'true':
js['value'] = _("You must accept the terms of service.").format(field=a)
js['field'] = 'terms_of_service'
return HttpResponse(json.dumps(js))
# Confirm appropriate fields are there.
# TODO: Check e-mail format is correct.
# TODO: Confirm e-mail is not from a generic domain (mailinator, etc.)? Not sure if
# this is a good idea
# TODO: Check password is sane
required_post_vars = ['username', 'email', 'name', 'password', 'terms_of_service', 'honor_code']
if tos_not_required:
required_post_vars = ['username', 'email', 'name', 'password', 'honor_code']
for a in required_post_vars:
if len(post_vars[a]) < 2:
error_str = {'username': 'Username must be minimum of two characters long.',
'email': 'A properly formatted e-mail is required.',
'name': 'Your legal name must be a minimum of two characters long.',
' |
#!/usr/bin/env python3
from os import environ, system
from subprocess import Popen
print('\nUltimate Doom (Classic)')
print('Link: https://store.steampowered.com/app/2280/Ultimate_Doom/\n')
home = environ['HOME']
core = home + '/bin/games/steam | -connect/steam-connect-core.py'
logo = home + '/bin/games/steam-connect/doom-logo.txt'
game = 'doom-1'
stid = '2280'
proc = 'gzdoom'
flag = ' +set dmflags 4521984'
conf = ' -config ' + home + '/.config/gzdoom/gzdoom-classic.ini'
save = ' -savedir ' + home + '/.config/gzdoom/saves/' + game
iwad = ' -iwad DOOM.WAD'
mods = ' -file music-doom.zip sprite-fix-6-d1.zip doom-sfx-high.zip speed- | weapons.zip'
args = proc + flag + conf + save + iwad + mods
system('cat ' + logo)
Popen([core, stid, args]).wait()
|
import csv
from django.db import transaction
from django import forms
from django.forms.forms import NON_FIELD_ERRORS
from django.utils import six
from django.utils.translation import ugettext_lazy as _
class CSVImportError(Exception):
pass
class ImportCSVForm(forms.Form):
csv_file = forms.FileField(required=True, label=_('CSV File'))
has_headers = forms.BooleanField(
label=_('Has headers'),
help_text=_('Check this if your CSV file '
'has a row with column headers.'),
initial=True,
required=False,
)
def __init__(sel | f, *args, **kwargs):
self.importer_class = kwargs.pop('importer_class')
self.dialect = kwargs.pop('dialect')
super(ImportCSVForm, self).__init__(*args, **kwargs)
self.fields['csv_file'].help_text = "Expected f | ields: {}".format(self.expected_fields)
def clean_csv_file(self):
if six.PY3:
# DictReader expects a str, not bytes in Python 3.
csv_text = self.cleaned_data['csv_file'].read()
csv_decoded = six.StringIO(csv_text.decode('utf-8'))
return csv_decoded
else:
return self.cleaned_data['csv_file']
@property
def expected_fields(self):
fields = self.importer_class._meta.fields
return ', '.join(fields)
@transaction.atomic
def import_csv(self):
try:
reader = csv.DictReader(
self.cleaned_data['csv_file'],
fieldnames=self.importer_class._meta.fields,
dialect=self.dialect,
)
reader_iter = enumerate(reader, 1)
if self.cleaned_data['has_headers']:
six.advance_iterator(reader_iter)
self.process_csv(reader_iter)
if not self.is_valid():
raise CSVImportError() # Abort the transaction
except csv.Error:
self.append_import_error(_("Bad CSV format"))
raise CSVImportError()
def process_csv(self, reader):
for i, row in reader:
self.process_row(i, row)
def append_import_error(self, error, rownumber=None, column_name=None):
if rownumber is not None:
if column_name is not None:
# Translators: "{row}", "{column}" and "{error}"
# should not be translated
fmt = _("Could not import row #{row}: {column} - {error}")
else:
# Translators: "{row}" and "{error}" should not be translated
fmt = _("Could not import row #{row}: {error}")
else:
if column_name is not None:
raise ValueError("Cannot raise a CSV import error on a specific "
"column with no row number.")
else:
# Translators: "{error}" should not be translated
fmt = _("Could not import the CSV document: {error}")
if NON_FIELD_ERRORS not in self._errors:
self._errors[NON_FIELD_ERRORS] = self.error_class()
self._errors[NON_FIELD_ERRORS].append(
fmt.format(error=error, row=rownumber, column=column_name))
def process_row(self, i, row):
importer = self.importer_class(data=row)
if importer.is_valid():
importer.save()
else:
for error in importer.non_field_errors():
self.append_import_error(rownumber=i, error=error)
for field in importer:
for error in field.errors:
self.append_import_error(rownumber=i, column_name=field.label,
error=error)
|
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from indico.util.json import dumps
import MaKaC
def jsonDescriptor(object):
# TODO: Merge with locators?
if isinstance(object, MaKaC.conference.Conference):
return {'conference': object.getId()}
elif isinstance(object, MaKaC.conference.Contribution):
return {'conference': object.getConference().getId(),
'contribution': object.getId()}
elif isinstance(object, MaKaC.conference.Session):
return {'conference': object.getConference().getId(),
'session': object.getId()}
elif isinstance(object, MaKaC.conference.SessionSlot):
return {'conference': object.getConference().getId(),
'session': object.getSession().getId(),
'slot': object.getId()}
elif isinstance(object, MaKaC.schedule.BreakTimeSchEntry):
info = {'conference': object.getOwner().getConference().getId(),
'break': object.getId()}
if isinstance(object.getOwner(), MaKaC.conference.SessionSlot):
info['slot'] = object.getOwner().getId()
info['session'] = object.getOwner().getSession().getId()
return info
return None
def jsonDescriptorType(descriptor):
if 'break' in descriptor:
return MaKaC.schedule.BreakTimeSchEntry
elif 'slot' in descriptor:
return MaKaC.conference.SessionSlot
elif 'contribution' in descriptor:
return MaKaC.conference.Contribution
elif 'session' in descriptor:
return MaKaC.conference.Session
elif 'conference' in descriptor:
return MaKaC.conference.Conference
else:
return None
def decideInheritanceText(event):
if isinstance(event, MaKaC.conference.SessionSlot):
text = _("Inherit from parent slot")
elif isinstance(event, MaKaC.conference.Session):
text = _("Inherit from parent session" | )
elif isinstance(event, MaKaC.conference.Conference):
text = _("Inherit from parent event")
else:
text = str(repr(parent))
return text
def roomInfo(event, level='real'):
# gets inherited/real/own location/room properties
if level == 'inherited':
room = event.getInh | eritedRoom()
location = event.getInheritedLocation()
text = decideInheritanceText(event.getLocationParent())
elif level == 'real':
room = event.getRoom()
location = event.getLocation()
text = decideInheritanceText(event)
elif level == 'own':
room = event.getOwnRoom()
location = event.getOwnLocation()
text = ''
locationName, roomName, address = None, None, None
if location:
locationName = location.getName()
address = location.getAddress()
if room:
roomName = room.getName()
return {'location': locationName,
'room': roomName,
'address': address,
'text': text}
|
#
# Here is a more complicated example that loads a .csv file and
# then creates a plot from the x,y data in it.
# The data file is the saved curve from partsim.com of the low pass filter.
# It was saved as xls file and then opened in Excel and exported to csv
#
# First import the csv parser, the numeric tools and plotting tools
import csv
import numpy as np # This gives numpy the shorthand np
import matplotlib.pyplot as plt
#
# Open the file
#
f = open("low_pass_filter.csv")
#
# Pass the file to the csv parser
#
data = csv.reader(f)
headers = data.next()
units = data.next()
#
# Here is a "wicked" way in Python that does quicker what the
# the more verbose code does below. It is "Matlab" like.
# dat = np.array([ [float(z) for z in x] for x in data ]) # put the data in dat as floats.
# x_ar = dat[:,0] | # select the first column
# y1_ar = dat[:,1] # select the second column
# y2_ar = dat[:,2] # select the third column
x_ar = [] # Create a new list (array) called dat to hold the data.
y1_ar = []
y2_ar = []
for (x,y1, | y2) in data: # Unpack the csv data into x,y1,y2 variables.
x_ar.append( float(x))
y1_ar.append(float(y1))
y2_ar.append(float(y2)) # Convert the variable from string to float and add to dat
#
# Now plot the data. plt.plot returns a tuple (plot, )
#
(p1,) = plt.plot(x_ar,y1_ar,color='green',label=headers[1])
(p2,) = plt.plot(x_ar,y2_ar,color='blue',label=headers[2])
plt.legend(handles=[p1,p2]) # make sure the legend is drawn
plt.xscale('log') # plot with a log x axis
plt.yscale('log')
plt.grid(True) # and a grid.
plt.title('Low pass filter')
plt.xlabel('F[Hz]',position=(0.9,1))
plt.ylabel('Amplitude [Volt]')
plt.show() # show the plot.
|
ean=experiment.mean,
# # variance=experiment.variance,
# # slope=experiment.slope,
# # limits=experiment.limits,
# # csv=experiment.csv,
# # normalize=experiment.normalize)
# #
# # f = open(experiment.output_file, "wb")
# #
# # m.save(f)
# #
# # f.close()
#
# def tza_sep_bands(job):
# """
# :type job: BandJob
# """
#
# if job.lnf_use:
# feats = BF.BandwiseFeatures(job.filename, db_spec=False)
# rrn.remove_random_noise(feats.spectrogram, filter_compensation=job.lnf_compensation, passes=job.lnf_passes)
# feats.spec_to_db()
# else:
# feats = BF.BandwiseFeatures(job.filename)
#
# if job.band_iterator == 'one':
# a = BF.OneBand(low=int(feats.spectrogram.metadata.min_freq),
# high=int(feats.spectrogram.metadata.max_freq))
#
# if job.band_iterator == 'linear':
# a = BF.LinearBand(low=int(feats.spectrogram.metadata.min_freq),
# high=int(feats.spectrogram.metadata.max_freq),
# step=job.band_step,
# nbands=job.band_nbands)
# if job.band_iterator == 'mel':
# a = BF.MelBand(low=int(feats.spectrogram.metadata.min_freq),
# high=int(feats.spectrogram.metadata.max_freq),
# step=job.band_step,
# nbands=job.band_nbands)
| #
# logger.debug("Extracting features for %s", job.filename)
# T0 = time.time()
# feats.calculate_features_per_ | band(a)
# T1 = time.time()
# logger.debug("Feature extraction took %f seconds", T1 - T0)
#
# return feats.band_features
def tza_bands_parallel(experiment, n_processes = 1):
"""
:type experiment: BandExperiment
:type n_processes: int
"""
jobs = []
with open(experiment.mirex_list_file) as f:
files = f.read().splitlines()
for f in files:
jobs.append(BandJob(f, experiment.band_iterator, experiment.band_step, experiment.band_nbands,
also_one_band=experiment.also_one_band,
lnf_use=experiment.lnf_use,
lnf_compensation=experiment.lnf_compensation,
lnf_passes=experiment.lnf_passes))
#calculate features
pool = Pool(processes=n_processes)
features = pool.map(tza_bands, jobs)
pool.close()
pool.join()
jobs = []
for f in features:
jobs.append((f, 100))
#calculate texture windows
pool = Pool(processes=n_processes)
textures = pool.map(tza_calc_textures, jobs)
pool.close()
pool.join()
stats = feat_stats.Stats()
m = stats.stats(textures,
mean=experiment.mean,
variance=experiment.variance,
slope=experiment.slope,
limits=experiment.limits,
csv=experiment.csv,
normalize=experiment.normalize)
f = open(experiment.mirex_scratch_folder + "/" + experiment.output_file, "wb")
m.save(f, restore_state=True)
f.close()
return m
def tza_calc_textures(args):
tw = texture_window.ToTextureWindow()
feature = args[0]
logger.debug("calculating textures for %s", feature.metadata.filename)
T0 = time.time()
results = tw.to_texture(feature, args[1])
T1 = time.time()
logger.debug("texture calculation took %f seconds", T1-T0)
return results
def tza_bands(job):
"""
:type job: BandJob
"""
if job.lnf_use:
feats = BF.BandwiseFeatures(job.filename, db_spec=False)
rrn.remove_random_noise(feats.spectrogram, filter_compensation=job.lnf_compensation, passes=job.lnf_passes)
feats.spec_to_db()
else:
feats = BF.BandwiseFeatures(job.filename)
if job.band_iterator == 'one':
a = BF.OneBand(low=int(feats.spectrogram.metadata.min_freq),
high=int(feats.spectrogram.metadata.max_freq))
if job.band_iterator == 'linear':
a = BF.LinearBand(low=int(feats.spectrogram.metadata.min_freq),
high=int(feats.spectrogram.metadata.max_freq),
step=job.band_step,
nbands=job.band_nbands)
if job.band_iterator == 'mel':
a = BF.MelBand(low=int(feats.spectrogram.metadata.min_freq),
high=int(feats.spectrogram.metadata.max_freq),
step=job.band_step,
nbands=job.band_nbands)
logger.debug("Extracting features for %s", job.filename)
T0 = time.time()
feats.calculate_features_per_band(a, also_one_band=job.also_one_band, discard_bin_zero=True)
T1 = time.time()
logger.debug("Feature extraction took %f seconds", T1 - T0)
feats.join_bands(crop=True)
return feats.joined_features
def MIREX_ExtractFeatures(scratch_folder, feature_extraction_list, n_processes,**kwargs):
also_one_band = False
if kwargs.has_key("also_one_band"):
if kwargs['also_one_band'] == True:
also_one_band = True
exp = BandExperiment(feature_extraction_list, scratch_folder,
output_file=kwargs['output_file'],
band_iterator=kwargs['band_iterator'],
band_nbands=kwargs['band_nbands'],
also_one_band=also_one_band)
if also_one_band:
print 'also running fullband'
return tza_bands_parallel(exp, n_processes=n_processes)
if __name__ == "__main__":
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_linear_bands_500.fm", band_iterator='linear', band_step=500)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_linear_bands_1000.fm", band_iterator='linear', band_step=1000)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_linear_bands_2000.fm", band_iterator='linear', band_step=2000)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_mel_bands_100.fm", band_iterator='mel', band_step=100)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_mel_bands_300.fm", band_iterator='mel', band_step=300)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_mel_bands_500.fm", band_iterator='mel', band_step=500)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_mel_bands_1000.fm", band_iterator='mel', band_step=1000)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_one_band.fm", band_iterator='one')
# tza_bands_parallel(exp, n_processes=4)
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_linear_10b.fm", band_iterator='linear', band_nbands=10)
# tza_bands_parallel(exp, n_processes=4)
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_linear_30b.fm", band_iterator='linear', band_nbands=30)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_linear_50b.fm", band_iterator='linear', band_nbands=50)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_mel_10b.fm", band_iterator='mel', band_nbands=10)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_wav/", "fm/genres/genres_tza_mel_30b.fm", band_iterator='mel', band_nbands=30)
# tza_bands_parallel(exp, n_processes=4)
#
# exp = BandExperiment("/home/juliano/Music/genres_w |
MDMzQTczRUY3NUE3NzA5QzdFNUYzMDQxNEM=',
'Content-Type': 'text/html',
'Expires': 'Thu, 17 Nov 2005 18:49:58 GMT',
'X-OSS-Meta-Author': 'foo@bar.com',
'X-OSS-Magic': 'abracadabra',
'Host': 'oss-example.oss-cn-hangzhou.aliyuncs.com'
}
action = '/oss-example/nelson'
actual = OSSConnection._get_auth_signature('PUT', headers, {},
headers['Expires'],
self.conn.key,
action,
'x-oss-')
self.assertEqual(expected, actual)
class ObjectTestCase(unittest.TestCase):
def test_object_with_chinese_name(self):
driver = OSSStorageDriver(*STORAGE_OSS_PARAMS)
obj = Object(name='中文', size=0, hash=None, extra=None,
meta_data=None, container=None, driver=driver)
self.assertTrue(obj.__repr__() is not None)
class OSSMockHttp(MockHttp, unittest.TestCase):
fixtures = StorageFileFixtures('oss')
base_headers = {}
def _unauthorized(self, method, url, body, headers):
return (httplib.UNAUTHORIZED,
'',
self.base_headers,
httplib.responses[httplib.OK])
def _list_containers_empty(self, method, url, body, headers):
body = self.fixtures.load('list_containers_empty.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_containers(self, method, url, body, headers):
body = self.fixtures.load('list_containers.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_container_objects_empty(self, method, url, body, headers):
body = self.fixtures.load('list_container_objects_empty.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_container_objects(self, method, url, body, headers):
body = self.fixtures.load('list_container_objects.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_container_objects_chinese(self, method, url, body, headers):
body = self.fixtures.load('list_container_objects_chinese.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_container_objects_prefix(self, method, url, body, headers):
params = {'prefix': self.test.prefix}
self.assertUrlContainsQueryParams(url, params)
body = self.fixtures.load('list_container_objects_prefix.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _get_container(self, method, url, body, headers):
return self._list_containers(method, url, body, headers)
def _get_object(self, method, url, body, headers):
return self._list_containers(method, url, body, headers)
def _notexisted_get_object(self, method, url, body, headers):
return (httplib.NOT_FOUND,
body,
self.base_headers,
httplib.responses[httplib.NOT_FOUND])
def _test_get_object(self, method, url, body, headers):
self.base_headers.update(
{'accept-ranges': 'bytes',
'connection': 'keep-alive',
'content-length': '0',
'content-type': 'application/octet-stream',
'date': 'Sat, 16 Jan 2016 15:38:14 GMT',
'etag': '"D41D8CD98F00B204E9800998ECF8427E"',
'last-modified': 'Fri, 15 Jan 2016 14:43:15 GMT',
'server': 'AliyunOSS',
'x-oss-object-type': 'Normal',
'x-oss-request-id': '569A63E6257784731E3D877F',
'x-oss-meta-rabbits': 'monkeys'})
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _invalid_name(self, method, url, body, headers):
# test_create_container_bad_request
return (httplib.BAD_REQUEST,
body,
headers,
httplib.responses[httplib.OK])
def _already_exists(self, method, url, body, headers):
# test_create_container_already_existed
return (httplib.CONFLICT,
body,
headers,
httplib.responses[httplib.OK])
def _create_container(self, method, url, body, headers):
# test_create_container_success
self.assertEqual('PUT', method)
self.assertEqual('', body)
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _create_container_location(self, method, url, body, headers):
# test_create_container_success
self.assertEqual('PUT', method)
location_constraint = ('<CreateBucketConfiguration>'
'<LocationConstraint>%s</LocationConstraint>'
'</CreateBucketConfiguration>' %
self.test.ex_location)
self.assertEqual(location_constraint, body)
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _delete_container_doesnt_exist(self, method, url, body, headers):
# test_delete_container_doesnt_exist
return (httplib.NOT_FOUND,
body,
headers,
httplib.responses[httplib.OK])
def _delete_container_not_empty(self, method, url, body, headers):
# test_delete_container_not_empty
return (httplib.CONFLICT,
body,
headers,
httplib.responses[httplib.OK])
def _delete_container(self, method, url, body, headers | ):
return (httplib.NO_CONTENT,
body,
self.base_headers,
httplib.responses[httplib.NO_CONTENT])
def _foo_bar_object_not_found(self, method, url, body, headers):
# test_delete_object_not_found
return (httplib.NOT_FOUND,
body,
headers,
httplib.responses[httplib | .OK])
def _foo_bar_object_delete(self, method, url, body, headers):
# test_delete_object
return (httplib.NO_CONTENT,
body,
headers,
httplib.responses[httplib.OK])
def _list_multipart(self, method, url, body, headers):
query_string = urlparse.urlsplit(url).query
query = parse_qs(query_string)
if 'key-marker' not in query:
body = self.fixtures.load('ex_iterate_multipart_uploads_p1.xml')
else:
body = self.fixtures.load('ex_iterate_multipart_uploads_p2.xml')
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _foo_bar_object(self, method, url, body, headers):
# test_download_object_success
body = generate_random_data(1000)
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _foo_bar_object_invalid_size(self, method, url, body, headers):
# test_upload_object_invalid_file_size
body = ''
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _foo_test_stream_data_multipart(self, method, url, body, headers):
headers = {}
body = ''
headers = {'etag': '"0cc175b9c0f1b6a831c399e269772661"'}
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
class OSSStorageDriverTestCase(unittest.TestCase):
driver_type = OSSStor |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
u"""
Задание 1: классный Человек.
УСЛОВИЕ:
Реализовать класс Person, который отображает запись в книге контактов.
Класс имеет 4 атрибута:
- surname - строка - фамилия контакта (обязательный)
- first_name - строка - имя контакта (обязательный)
- nickname - строка - псевдоним (опциональный)
- birth_date - объект datetime.date (обязательный)
Каждый вызов класса должен создавать экземпляр (инстанс) класса с указанными
атрибутами.
Также класс имеет 2 метода:
- get_age() - считает возраст контакта в полных годах на дату вызова и
возвращает строку вида: "27";
- get_fullname() - возвращает строку, отражающую полное имя (фамилия + имя)
контакта;
"""
__author__ = "Sergei Shybkoi"
__copyright__ = "Copyright 2014, The Homework Project"
__email__ = "heap_@mail.ru"
__status__ = "Production"
__date__ = "2014-11-18"
import datetime
class Person(object):
u"""Класс Person"""
def __init__(self, surname, first_name, birth_date, nickname=None):
u"""Инишн класса"""
try:
var_date = datetime.datetime.strptime(birth_date, "%Y-%m-%d | ")
res_date = datetime.date(var_date.year,
var_date.month, var_date.day)
except TypeError:
print | "Incorrect type of birthday date!"
res_date = None
except ValueError:
print "Wrong value of birthday date!"
res_date = None
self.surname = surname
self.first_name = first_name
self.birth_date = res_date
if nickname is not None:
self.nickname = nickname
def get_age(self):
u"""Метод класса подсчитывает и выводит количество полных лет"""
if self.birth_date is not None:
today_date = datetime.date.today()
delta = today_date.year - self.birth_date.year
if today_date.month <= self.birth_date.month \
and today_date.day < self.birth_date.day:
delta -= 1
print "Age:", delta
return str(delta)
else:
print "No correct data about person's birthday."
return "0"
def get_fullname(self):
u"""Метод выводит и возвращаем полное имя экземпляра класса Person"""
print self.surname, self.first_name
return self.surname + " " + self.first_name
|
# -*- coding:utf-8 -*-
'''
x1 | 1perf测试工具执行脚本
'''
import os, shutil, re
from test import BaseTest
from lpt.lib.error import *
from lpt.lib import lptxml
from lpt.lib import | lptlog
from lpt.lib.share import utils
from lpt.lib import lptreport
import glob
glxgears_keys = ["gears"]
class TestControl(BaseTest):
'''
继承BaseTest属性和方法
'''
def __init__(self, jobs_xml, job_node, tool, tarball='UnixBench5.1.3-1.tar.bz2'):
super(TestControl, self).__init__(jobs_xml, job_node, tool, tarball)
def check_deps(self):
'''编译ubgears需要提供libx11-devel包和libGL-devel包
'''
utils.has_gcc()
utils.has_file("libX11-devel", "/usr/include/X11/Xlib.h")
utils.has_file("libGL-devel", "/usr/include/GL/gl.h")
utils.has_file("libXext-devel","/usr/include/X11/extensions/Xext.h")
def setup(self):
'''编译源码,设置程序
'''
if not self.check_bin(self.processBin):
self.tar_src_dir = self.extract_bar()
os.chdir(self.tar_src_dir)
utils.make(extra='clean', make='make')
#修改Makefile文件
lptlog.info("修改Makefile, 取消#GRAPHIC_TESTS = defined注释")
cmd = '''sed -i "s/^#GRAPHIC_TESTS/GRAPHIC_TESTS/g" Makefile '''
utils.system(cmd)
self.compile(make_status=True)
os.chdir(self.lpt_root)
def run(self):
tool_node = self.check_tool_result_node()
lptlog.info("----------开始获取测试参数")
self.times = self.get_config_value(tool_node, "times", 10, valueType=int)
lptlog.info("测试次数: %d" % self.times)
self.parallels = [1]
cmd = "./Run"
args_list = ["ubgears", "-i", "%d" % self.times]
self.mainParameters["parameters"] = " ".join([cmd]+args_list)
#运行unixbench程序,进入unixbench 根目录
os.chdir(self.tar_src_dir)
utils.system("rm -rf results/*")
lptlog.info("---------运行测试脚本")
utils.run_shell2(cmd, args_list=args_list, file=os.devnull)
os.chdir(self.lpt_root)
def create_result(self):
#数据处理
#
os.chdir(self.tar_src_dir)
temp_result_list = glob.glob("./results/*[0-9]")
if not temp_result_list:
raise NameError, "% result data not found.." % self.tool
else:
temp_result_file = temp_result_list[0]
self.__match_index(temp_result_file)
#返回根目录
os.chdir(self.lpt_root)
def __match_index(self, file):
'''获取unixbench屏幕输出
'''
self.parallels = [1]
self.times = 3
result_dic = {}.fromkeys(glxgears_keys, 0)
result_lines = utils.read_all_lines(file)
for parallel in self.parallels:
re_match = "[\d]+ CPUs in system; running %d parallel copy of tests" % parallel
parallel_result_dic = result_dic.copy()
for line in result_lines:
if re.search(re_match, line, re.I):
parallel_index = result_lines.index(line)
paralell_result_list = [ self.__get_value(result_lines, parallel_index+index) for index in (5,) ]
for l,v in zip(tuple(glxgears_keys), tuple([utils.change_type(i) for i in paralell_result_list])):
parallel_result_dic[l] = "%.1f" % v
parallel_result_attrib = self.create_result_node_attrib("Average", self.times, parallel, self.parallels)
self.result_list.append([parallel_result_attrib, parallel_result_dic])
def __get_value(self, lines, index):
return lines[index].split()[-2]
|
"""The tests for hls streams."""
from datetime import timedelta
from io import BytesIO
from unittest.mock import patch
from homeassistant.setup import async_setup_component
from homeassistant.components.stream.core import Segment
from homeassistant.components.stream.recorder import recorder_save_worker
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed
from tests.components.stream.common import (
generate_h264_video, preload_stream)
async def test_record_stream(hass, hass_client):
"""
Test record stream.
Purposefully not mocking anything here to test full
integration with the stream component.
"""
await async_setup_component(hass, 'stream', {
'stream': {}
})
with patch(
'homeassistant.components.stream.recorder.recorder_save_worker'):
# Setup demo track
source = generate_h264_video()
stream = preload_stream(hass, source)
recorder = stream.add_provider('recorder')
stream.start()
segments = 0
while True:
segment = await recorder.recv()
if not segment:
break
segments += 1
stream.stop()
assert segments > 1
async def test_recorder_timeout(hass, hass_client):
"""Test recorder timeout."""
await async_setup_component(hass, 'stream', {
'stream' | : {}
})
with patch(
'homeassistant.components.stream.recorder.RecorderOutput.cleanup'
) as mock_cleanup:
# Setup demo track
source = generate_h264_video()
stream = preload_stream(hass, source)
recorder = stream.add_provider('recorder')
stream.start()
await recorder.recv()
| # Wait a minute
future = dt_util.utcnow() + timedelta(minutes=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert mock_cleanup.called
async def test_recorder_save():
"""Test recorder save."""
# Setup
source = generate_h264_video()
output = BytesIO()
output.name = 'test.mp4'
# Run
recorder_save_worker(output, [Segment(1, source, 4)])
# Assert
assert output.getvalue()
|
#!/usr/bin/env python3
"""hello with args"""
import sys
import os
args = sys.argv
if len(args) != 2:
script = os.path.basename(args[0])
print('Usage: {} NAME'.format(scrip | t))
sys.exit(1)
name = args[1]
prin | t('Hello, {}!'.format(name))
|
import os
import platform
import subprocess
import cat_service
from apps.general import headers
def main():
headers.print_header('LOLCAT FACTORY')
# look for a directory if not there create it
dir_path = get_or_create_output_folder()
n_cats = get_number_cats()
# contact the lol cat api, get binary
download_cats(dir_path, n_cats)
# launch explorer
display_cats(dir_path)
def get_or_create_output_folder():
dir_path = os.path.join('C:\\Users', 'Catriona', 'Desktop', 'Lolcats')
if not os.path.exists(dir_path) or not os.path.isdir(dir_path):
os.mkdir(dir_path)
return dir_path
def get_number_cats():
n_cats = 0
while True:
number_files = input('On a scale of 1 to 10 how much cheering up do you need?')
if number_files.isnumeric():
n_cats = int(number_files)
return n_cats
print('That was not a valid number please try again')
def download_cats(dir_path, n_cats):
for i in range(n_cats):
cat_service.get_cat(dir_path, 'lol_cat{}.jpg'.format(i))
def display_cats(folder):
print('Opening folder: {}'.format(folder))
if platform.system() == 'Darwin':
subprocess.call(['open', folder])
e | lif platform.system() == 'Windows':
print('with windows')
| subprocess.call(['explorer', folder])
elif platform.system() == 'Linux':
subprocess.call(['xdg-open', folder])
else:
print('Do not support your os "{}"'.format(platform.system()))
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*- |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
| dependencies = [
('twit', '0006_auto_20160419_0248'),
]
operations = [
migrations.CreateModel(
name='Retweet',
fields=[
('id', models.BigIntegerField(serialize=False, help_text='Unique id that comes from Twitter', primary_key=True)),
('created_at', models.DateTimeField(help_text='Time tweet was created')),
('tweet', models.ForeignKey(to='twit.Tweet')),
('user', models.ForeignKey(to='twit.User')),
],
),
]
|
# -*- coding: utf-8 -*-
import socket
import struct
import signal
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
class Connection(object):
"""
サーバとの通信用クラス
"""
BINARY_INT = '!1I'
BINARY_TABLE = '!120I'
def __init__(self, addr='127.0.0.1', port=42485):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.sock.connect((addr, port))
def __enter__(self):
| return self
def __exit__(self, *exc):
self.sock.close()
def recv_int(self):
unpacked_value = self._recv_msg(byte_length=4)
| s = struct.Struct(self.BINARY_INT)
integer = s.unpack(unpacked_value)
return integer[0]
def recv_table(self):
unpacked_value = self._recv_msg(byte_length=480)
s = struct.Struct(self.BINARY_TABLE)
ls = s.unpack(unpacked_value)
table = [ls[15 * i: 15 * (i + 1)][:] for i in range(8)] # 8x15のリストに変換
return table
def _recv_msg(self, byte_length):
unpacked_data = b''
while len(unpacked_data) < byte_length:
chunk = self.sock.recv(byte_length - len(unpacked_data), 0)
if chunk == b'':
raise RuntimeError('socket connection broken')
unpacked_data += chunk
return unpacked_data
# この中で、配列を構築すべきではない。構築する部分は分離して配列をsend_tableに渡すべき
def send_name(self, name, protocol=20070):
table = [[0] * 15 for i in range(8)]
table[0][0] = protocol
for i, ch in enumerate(name):
table[1][i] = ord(ch)
self.send_table(table)
def send_table(self, table):
ls = [item for inner in table for item in inner] # 2次元リストを1次元に変換
s = struct.Struct(self.BINARY_TABLE)
packed_value = s.pack(*ls)
self._send_msg(packed_value)
def _send_msg(self, msg):
self.sock.sendall(msg)
|
from share.transform.chain.exceptions import * # noqa
from share.transform.chain.links import * # noqa
from share.transform.chain.parsers import * # noqa
from share.transf | orm.chain.transformer import ChainTransformer # noqa
from share.transform.chain.links import Context
# Context singleton to be used f | or parser definitions
# Class SHOULD be thread safe
# Accessing subattribtues will result in a new copy of the context
# to avoid leaking data between chains
ctx = Context()
|
True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.C | harField', [], {'max_length': '100'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.r | elated.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.groupedmessage': {
'Meta': {'unique_together': "(('project', 'logger', 'view', 'checksum'),)", 'object_name': 'GroupedMessage'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.message': {
'Meta': {'object_name': 'Message'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'message_set'", 'null': 'True', 'to': "orm['sentry.GroupedMessage']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.GroupedMessage']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.GroupedMessage']"}),
'id': ('djang |
from_response(location1)
location2 = ""
content_disposition = ""
for header in headers:
logger.info("header2="+str(header))
if header[0]=="location":
location2 = header[1]
location = location2
if location=="":
location = location1
return [ ["(Premium) [wupload]",location + "|" + "User-Agent="+urllib.quote("Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12") ] ]
return []
def get_free_url(page_url):
location = scrapertools.get_header_from_response(page_url,header_to_get="location")
if location!="":
page_url = location
logger.info("[wupload.py] location=%s" % page_url)
video_id = extract_id(page_url)
logger.info("[wupload.py] video_id=%s" % video_id)
data = scrapertools.cache_page(url=page_url)
patron = 'href="(.*?start=1.*?)"'
matches = re.compile(patron).findall(data)
scrapertools.printMatches(matches)
if len(matches)==0:
logger.error("[wupload.py] No encuentra el enlace Free")
return []
# Obtiene link de descarga free
download_link = matches[0]
if not download_link.startswith("http://"):
download_link = urlparse.urljoin(page_url,download_link)
logger.info("[wupload.py] Link descarga: "+ download_link)
# Descarga el enlace
headers = []
headers.append( ["X-Requested-With", "XMLHttpRequest"] )
headers.append( ["Referer" , page_url ])
headers.append( ["User-Agent" , "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12" ])
headers.append( ["Content-Type" , "application/x-www-form-urlencoded; charset=UTF-8"])
headers.append( ["Accept-Encoding" , "gzip, deflate"])
headers.append( ["Accept","*/*"])
headers.append( ["Accept-Language","es-es,es;q=0.8,en-us;q=0.5,en;q=0.3"])
headers.append( ["Accept-Charset","ISO-8859-1,utf-8;q=0.7,*;q=0.7"])
headers.append( ["Connection","keep-alive"])
headers.append( ["Pragma","no-cache"])
headers.append( ["Cache-Control","no-cache"])
data = scrapertools.cache_page( download_link , headers=headers, post="" )
logger.info(data)
while True:
# Detecta el tiempo de espera
patron = "countDownDelay = (\d+)"
matches = re.compile(patron).findall(data)
if len(matches)>0:
tiempo_espera = int(matches[0])
logger.info("[wupload.py] tiempo de espera %d segundos" % tiempo_espera)
#import time
#time.sleep(tiempo_espera)
from platformcode.xbmc import xbmctools
resultado = xbmctools.handle_wait(tiempo_espera+5,"Progreso","Conectando con servidor Wupload (Free)")
if resultado == False:
break
tm = get_match(data,"name='tm' value='([^']+)'")
tm_hash = get_match(data,"name='tm_hash' value='([^']+)'")
post = "tm=" + tm + "&tm_hash=" + tm_hash
data = scrapertools.cache_page( download_link , headers=headers, post=post )
logger.info(data)
else:
logger.info("[wupload.py] no encontrado tiempo de espera")
# Detecta captcha
patron = "Recaptcha\.create"
matches = re.compile(patron).findall(data)
if len(matches)>0:
logger.info("[wupload.py] est� pidiendo el captcha")
recaptcha_key = get_match( data , 'Recaptcha\.create\("([^"]+)"')
logger.info("[wupload.py] recaptcha_key="+recaptcha_key)
data_recaptcha = scrapertools.cache_page("http://www.google.com/recaptcha/api/challenge?k="+recaptcha_key)
patron="challenge.*?'([^']+)'"
challenges = re.compile(patron, re.S).findall(data_recaptcha)
if(len(challenges)>0):
challenge = challenges[0]
image = "http://www.google.com/recaptcha/api/image?c="+challenge
#CAPTCHA
exec "import seriesly.captcha as plugin"
tbd = plugin.Keyboard("","",image)
tbd.doModal()
confirmed = tbd.isConfirmed()
if (confirmed):
tecleado = tbd.getText()
#logger.info("")
#tecleado = raw_input('Grab ' + image + ' : ')
post = "recaptcha_challenge_field=%s&recaptcha_response_field=%s" % (challenge,tecleado.replace(" ","+"))
data = scrapertools.cache_page( download_link , headers=headers, post=post )
logger.info(data)
else:
logger.info("[wupload.py] no encontrado captcha")
# Detecta captcha
patron = '<p><a href="(http\:\/\/.*?wupload[^"]+)">'
matches = re.compile(patron).findall(data)
if len(matches)>0:
final_url = matches[0]
'''
'GET /download/2616019677/4f0391ba/9bed4add/0/1/580dec58/3317afa30905a31794733c6a32da1987719292ff
HTTP/1.1
Accept-Language: es-es,es;q=0.8,en-us;q=0.5,en;q=0.3
Accept-Encoding: gzip, deflate
Connection: close\r\nAccept: */*\r\nUser-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12
Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7
Host: s107.wupload.es
Referer: http://www.wupload.es/file/2616019677
Pragma: no-cache
Cache-Control: no-cache
Content-Type: application/x-www-form-urlencoded; charset=UTF-8
00:39:39 T:2956623872 NOTICE: reply:
00:39:39 T:2956623872 NOTICE: 'HTTP/1.1 200 OK\r\n'
00:39:39 T:2956623872 NOTICE: header:
00:39:39 T:2956623872 NOTICE: Server: nginx
00:39:39 T:2956623872 NOTICE: header:
00:39:39 T:2956623872 NOTICE: Date: Tue, 03 Jan 2012 23:39:39 GMT
00:39:39 T:2956623872 NOTICE: header:
00:39:39 T:2956623872 NOTICE: Content-Type: "application/octet-stream"
00:39:39 T:2956623872 NOTICE: header:
00:39:39 T:2956623872 NOTICE: Content-Length: 230336429
00:39:39 T:2956623872 NOTICE: header:
00:39:39 T:2956623872 NOTICE: Last-Modified: Tue, 06 Sep 2011 01:07:26 GMT
00:39:39 T:2956623872 NOTICE: header:
00:39:39 T:2956623872 NOTICE: Connection: close
00:39:39 T:2956623872 NOTICE: header:
00:39:39 T:2956623872 NOTICE: Set-Cookie: dlc=1; expires=Thu, 02-Feb-2012 23:39:39 GMT; path=/; domain=.wupload.es
00:39:39 T:2956623872 NOTICE: header:
00:39:39 T:2956623872 NOTICE: : attachment; filename="BNS609.mp4"
'''
logger.info("[wupload.py] link descarga " + final_url)
return [["(Free)",final_url + '|' + 'Referer=' + urllib.quote(page_url) + "&Content-Type=" + urllib.quote("application/x-www-form-urlencoded; charset=UTF-8")+"&Cookie="+urllib.quote("lastUrlLinkId="+video_id)]]
else:
logger.info("[wupload.py] no detectado link descarga")
def extract_id(url):
return get_match(url, 'wupload.*?/file/(\d+)')
def get_match(data, regex) :
match = "";
m = re.search(regex, data)
if m != None :
match = m.group(1)
return match
def find_videos(data):
encontrados = set()
devuelve = []
patronvideos = '(http://www.wupload.*?/file/\d+) | '
logger.info("[wupload. | py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos).findall(data)
for match in matches:
titulo = "[wupload]"
url = match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'wupload' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
# Encontrado en animeflv
#s=mediafire.com%2F%3F7fsmmq2144fx6t4|-|wupload.com%2Ffile%2F2653904582
patronvideos = 'wupload.com\%2Ffile\%2F(\d+)'
logger.info("[wu |
import urllib
import urllib2
import cookielib
import logging
class GISTokenGenerator:
def __init__(self, email, password):
self.cj = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib | 2.HTTPCookieProcessor(self.cj))
self.email = email
self.login_data = urllib.urlencode({'user[email]': email, 'user[password]': password})
def generate_token(self):
| logging.info('Generating a token for {0}...'.format(self.email))
self.opener.open('https://auth.aiesec.org/users/sign_in', self.login_data)
token = None
for cookie in self.cj:
if cookie.name == 'expa_token':
token = cookie.value
if token is None:
raise Exception('Unable to generate a token for {0}!'.format(self.email))
return token
|
import sys, os
from pythonparser import diagnostic
from ...language.environment import ProcessArgumentManager
from ...master.databases import DeviceDB, DatasetDB
from ...master.worker_db import DeviceManager, DatasetManager
from ..module import Module
from ..embedding import Stitcher
from ..targets import OR1KTarget
from . import benchmark
def main():
if not len(sys.argv) == 2:
print("Expected exactly one module filename", file=sys.stderr)
exit(1)
def process_diagnostic(diag):
print("\n".join(diag.render()), file=sys.stderr)
if diag.level in ("fatal", "error"):
exit(1)
engine = diagnostic.Engine()
engine.process = process_diagnostic
with open(sys.argv[1]) as f:
testcase_code = compile(f.read(), f.name, "exec")
testcase_vars = {'__name__': 'testbench'}
exec(testcase_code, testcase_vars)
device_db_path = os.path.join(os.path.dirname(sys.argv[1]), "device_db.py")
device_mgr = DeviceManager(DeviceDB(device_db_path))
dataset_db_path = os.path.join(os.path.dirname(sys.argv[1]), "dataset_db.pyon")
dataset_mgr = DatasetManager(DatasetDB(dataset_db_path))
argument_mgr = ProcessArgumentManager({})
def embed():
experiment = testcase_vars["Benchmark"]((device_mgr, dataset_mgr, argument_mgr))
stitcher = Stitcher(core=experiment.core, dmgr=device_mgr)
stitcher.stitch_call(experiment.run, (), {})
stitcher.finalize()
return stitcher
stitcher = embed()
module = Module(stitcher | )
target = OR1KTarget()
llvm_ir = target.compile(module)
elf_obj = target.assemble(llvm_ir)
elf_shlib = target.link([elf_obj])
benchmark(lambda: embed(),
"ARTIQ embedding")
benchmark(lambda: Module(stitcher),
"ARTIQ transforms and validators")
benchmark(lambda: target.compile(module),
"LLVM optimizations")
benchma | rk(lambda: target.assemble(llvm_ir),
"LLVM machine code emission")
benchmark(lambda: target.link([elf_obj]),
"Linking")
benchmark(lambda: target.strip(elf_shlib),
"Stripping debug information")
if __name__ == "__main__":
main()
|
e'
SORT_METHOD_FULL_PATH = 'full_path'
SORT_METHOD_GENRE = 'genre'
SORT_METHOD_LABEL = 'label'
SORT_METHOD_LABEL_IGNORE_FOLDERS = 'label_ignore_folders'
SORT_METHOD_LABEL_IGNORE_THE = 'label_ignore_the'
SORT_METHOD_LAST_PLAYED = 'last_played'
SORT_METHOD_LISTENERS = 'listeners'
SORT_METHOD_MPAA_RATING = 'mpaa_rating'
SORT_METHOD_NONE = 'none'
SORT_METHOD_PLAY_COUNT = 'play_count'
SORT_METHOD_PLAYLIST_ORDER = 'playlist_order'
SORT_METHOD_PRODUCTION_CODE = 'production_code'
SORT_METHOD_PROGRAM_COUNT = 'program_count'
SORT_METHOD_SIZE = 'size'
SORT_METHOD_SONG_RATING = 'song_rating'
SORT_METHOD_STUDIO = 'studio'
SORT_METHOD_STUDIO_IGNORE_THE = 'studio_ignore_the'
SORT_METHOD_TITLE = 'title'
SORT_METHOD_TITLE_IGNORE_THE = 'title_ignore_the'
SORT_METHOD_TRACK_NUMBER = 'track_number'
SORT_METHOD_UNSORTED = 'unsorted'
SORT_METHOD_VIDEO_RATING = 'video_rating'
SORT_METHOD_VIDEO_RUNTIME = 'video_runtime'
SORT_METHOD_VIDEO_SORT_TITLE = 'video_sort_title'
SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE = 'video_sort_title_ignore_the'
SORT_METHOD_VIDEO_TITLE = 'video_title'
SORT_METHOD_VIDEO_YEAR = 'video_year'
CONTENT_TYPE_FILES = 'files'
CONTENT_TYPE_SONGS = 'songs'
CONTENT_TYPE_ARTISTS = 'artists'
CONTENT_TYPE_ALBUMS = 'albums'
CONTENT_TYPE_MOVIES = 'movies'
CONTENT_TYPE_TV_SHOWS = 'tvshows'
CONTENT_TYPE_EPISODES = 'episodes'
CONTENT_TYPE_MUSIC_VIDEOS = 'musicvideos'
LOG_DEBUG = 0
LOG_INFO = 1
LOG_WARNING = 2
LOG_ERROR = 3
def __init__(self, path=u'/', params=None, plugin_name=u'', plugin_id=u''):
if not params:
params = {}
pass
self._path_match = None
self._python_version = None
self._cache_path = None
self._function_cache = None
self._search_history = None
self._favorite_list = None
self._watch_later_list = None
self._access_manager = None
self._plugin_name = unicode(plugin_name)
self._version = 'UNKNOWN'
self._plugin_id = plugin_id
self._path = path
self._params = params
self._utils = None
self._view_mode = None
# create valid uri
self._uri = self.create_uri(self._path, self._params)
pass
def set_path_match(self, path_match):
"""
Sets the current regular expression match for a navigated path
:param path_match: regular expression match
"""
self._path_match = path_match
pass
def get_path_match(self):
"""
Returns the current path match of regular expression
:return: match of regular expression
"""
return self._path_match
def format_date_short(self, date_obj):
raise NotImplementedError()
def format_time(self, time_obj):
raise NotImplementedError()
def get_language(self):
raise NotImplementedError()
def _get_cache_path(self):
if not self._cache_path:
self._cache_path = os.path.join(self.get_data_path(), 'kodion')
pass
return self._cache_path
def get_function_cache(self):
if not self._function_cache:
settings = self.get_settings()
max_cache_size_mb = settings.get_int(AbstractSettings.ADDON_CACHE_SIZE, 5)
self._function_cache = FunctionCache(os.path.join(self._get_cache_path(), 'cache'),
max_file_size_kb=max_cache_size_mb * 1024)
if settings.is_clear_cache_enabled():
self.log_info('Clearing cache...')
settings.disable_clear_cache()
self._function_cache.remove_file()
self.log_info('Clearing cache done')
pass
pass
return self._function_cache
def cache_function(self, seconds, func, *args, **keywords):
return self.get_function_cache().get(seconds, func, *args, **keywords)
def get_search_history(self):
if not self._search_history:
max_search_history_items = self.get_settings().get_int(AbstractSettings.ADDON_SEARCH_SIZE, 50,
lambda x: x * 10)
self._search_history = SearchHistory(os.path.join(self._get_cache_path(), 'search'),
max_search_history_items)
pass
return self._search_history
def get_favorite_list(self):
if not self._favorite_list:
self._favorite_list = FavoriteList(os.path.join(self._get_cache_path(), 'favorites'))
pass
return self._favorite_list
def get_watch_later_list(self):
if not self._watch_later_list:
self._watch_later_list = WatchLaterList(os.path.join(self._get_cache_path(), 'watch_later'))
pass
return self._watch_later_list
def get_access_manager(self):
if not self._access_manager:
self._access_manager = AccessManager(self.get_settings())
pass
return self._access_manager
def get_video_playlist(self):
raise NotImplementedError()
def get_audio_playlist(self):
raise NotImplementedError()
def get_video_player(self):
raise NotImplementedError()
def get_audio_player(self):
raise NotImplementedError()
def get_ui(self):
raise NotImplementedError()
def get_system_version(self):
raise NotImplementedError()
def get_system_name(self):
raise NotImplementedError()
def get_python_version(self):
if not self._python_version:
try:
import platform
python_version = str(platform.python_version())
python_version = python_version.split('.')
self._python_version = tuple(map(lambda x: int(x), python_version))
except Exception, ex:
self.log_error('Unable to get the version of python')
self.log_error(ex.__str__())
self._python_version = [0, 0]
pass
pass
return self._python_version
def create_uri(self, path=u'/', params=None):
if not params:
params = {}
pass
uri_path = utils.path.to_uri(path)
if uri_path:
uri = "%s://%s%s" % ('plugin', utils.strings.to_utf8(self._plugin_id), uri_path)
else:
uri = "%s://%s/" % ('plugin', utils.strings.to_utf8(self._plugin_id))
pass
if len(params) > 0:
# make a copy of the map
uri_params = {}
uri_params.update(params)
# encode in utf-8
for key in uri_params:
param = params[key]
# convert dict to string via json
if isinstance(param, dict):
param = json.dumps(param)
pass
uri_params[key] = utils.strings.to_utf8(param)
pass
uri += '?' + urllib.urlencode(uri_params)
pass
return uri
def get_path(self):
return self._path
def get_params(self):
return self._params
def get_param(self, name, default=None):
return self.get_params().get(name, default)
def get_data_path(self):
"""
Returns the path for read/write nightcrawler of files
:return:
"""
raise NotImplementedError()
def get_native_path(self):
raise NotImplementedError()
|
def get_ico | n(self):
return os.path.join(self.get_native_path(), 'icon.png')
def get_fanart(self):
return os.path.join(self.get_native_path(), 'fanart.jpg')
def create_resource_path(self, relative_path):
relative_path = utils.path.normalize(relative_path)
path_comps = relative_path.split('/')
return os.path.join(self.get_native_path(), 'resources', *path_comps)
def get_uri(self):
return self._uri
def get_name(self):
return |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.