prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
import sys
import os
import Pyro5
import logging
sys.path.extend(['..', '../..'])
import mupif as mp
import time as timemod
import uuid
import pbs_tool
log = logging.getLogger()
@Pyro5.api.expose
class Application10(mp.Model):
"""
Simple application which sums given time values times 2
"""
def __init__(self, metadata={}, **kwargs):
MD = {
'Name': 'Simple time summator',
'ID': 'N/A',
'Description': 'Cummulates given time values times 2',
'Version_date': '12/2021',
'Physics': {
'Type': 'Other',
'Entity': 'Other'
},
'Solver': {
'Software': 'Python script',
'Language': 'Python3',
'License': 'LGPL',
'Creator': 'Stanislav',
'Version_date': '12/2021',
'Type': 'Summator',
'Documentation': 'Nowhere',
'Estim_time_step_s': 1,
'Estim_comp_time_s': 0.01,
'Estim_execution_cost_EUR': 0.01,
'Estim_personnel_cost_EUR': 0.01,
'Required_expertise': 'None',
'Accuracy': 'High',
'Sensitivity': 'High',
'Complexity': 'Low',
'Robustness': 'High'
},
'Inputs': [
{'Type': 'mupif.Property', 'Type_ID': 'mupif.DataID.PID_Time', 'Name': 'Time value',
'Description': 'Time', 'Units': 's', 'Required': True, "Set_at": "timestep", "Obj_ID": '1', "ValueType": "Scalar"}
],
'Outputs': [
{'Type': 'mupif.Property', 'Type_ID': 'mupif.DataID.PID_Time', 'Name': 'Cummulated time value',
'Description': 'Cummulative time', 'Units': 's', "ValueType": "Scalar"}
]
}
super().__init__(metadata=MD, **kwargs)
self.updateMetadata(metadata)
self.value = 0.
self.input = 0.
def initialize(self, workdir='', metadata={}, validateMetaData=True, **kwargs):
super().initialize(workdir=workdir, metadata=metadata, validateMetaData=validateMetaData, **kwargs)
def get(self, objectTypeID, time=None, objectID=""):
md = {
'Execution': {
'ID': self.getMetadata('Execution.ID'),
'Use_case_ID': self.getMetadata('Execution.Use_case_ID'),
'Task_ID': self.getMeta | data('Execution.Task_ID')
}
}
if objectTypeID == mp.DataID.PID_Time:
return mp.ConstantProperty(value=self.value, propID=mp.DataID.PID_Time, valueType=mp.ValueType.Scalar, unit=mp.U.s, time=time, metadata=md)
def set(self, obj, objectID=""):
if obj.isInstance(mp.Property):
if obj.getPropertyID() == mp.DataID.PID_Time:
self.input = obj.inUnitsOf(mp.U.s).getValue()
def | solveStep(self, tstep, stageID=0, runInBackground=False):
# this function is designed to run the executable in Torque or Slurm PBS and process the output when the job is finished.
rp = os.path.realpath(__file__)
dirname = os.path.dirname(rp)
# create unique input and output file names (this is specific for each application/executable)
step_id = uuid.uuid4()
inpfile = "%s/inp_%s.txt" % (dirname, step_id)
outfile = "%s/out_%s.txt" % (dirname, step_id)
#
# create the input file
f = open(inpfile, 'w')
f.write("%f" % self.input)
f.close()
#
# submit the job
jobid = pbs_tool.submit_job(command=" -v inpfile=\"%s\",outfile=\"%s\",script=\"%s/appexec.py\",dirname=\"%s\" %s/appexec.job -o %s/log.txt -e %s/err.txt" % (inpfile, outfile, dirname, dirname, dirname, dirname, dirname))
#
# wait until the job is finished
# After its completion, the job stays in the list of jobs with 'Completed' status for a while.
# After that time it is not in the list any more, which results in 'Unknown' state.
# With 60-second period of checking the job should be still available in the list.
pbs_tool.wait_until_job_is_done(jobid=jobid, checking_frequency=1.)
#
# process the results (this is specific for each application/executable)
if os.path.exists(outfile):
f = open(outfile, 'r')
read_value = f.readline()
f.close()
if read_value != "error":
self.value += float(read_value)
else:
raise mp.apierror.APIError("A problem occured in the solver.")
else:
print("File '%s' does not exist." % outfile)
raise mp.apierror.APIError("The output file does not exist.")
# delete the temporary input and output files
if os.path.exists(inpfile):
os.remove(inpfile)
if os.path.exists(outfile):
os.remove(outfile)
def getCriticalTimeStep(self):
return 1000.*mp.U.s
def getAssemblyTime(self, tstep):
return tstep.getTime()
def getApplicationSignature(self):
return "Application10"
|
#! /usr/bin/env python
# -*- coding: ut | f-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tqdm import tqdm
def wrap_iterator(iterator, progressbar):
if progressbar:
iterator = tqdm(iterator)
return iterator
def wrap_generator(generator, progressbar, total):
| if progressbar:
generator = tqdm(generator, total=total)
return generator
|
from feeluown.utils.dispatch import Signal
from feeluown.gui.widgets.my | _music import MyMusicModel
class MyMusicItem(object):
def __init__(self, text):
self.text = text
self.clicked = Signal()
class MyMusicUiManager:
"""
.. note::
目前,我们用数组的数据结构来保存 items,只提供 add_item 和 clear 方法。
我们希望,MyMusic 中的 items 应该和 provider 保持关联。provider 是 MyMusic
的上下文。
| 而 Provider 是比较上层的对象,我们会提供 get_item 这种比较精细的控制方法。
"""
def __init__(self, app):
self._app = app
self._items = []
self.model = MyMusicModel(app)
@classmethod
def create_item(cls, text):
return MyMusicItem(text)
def add_item(self, item):
self.model.add(item)
self._items.append(item)
def clear(self):
self._items.clear()
self.model.clear()
|
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from marshmallow import fields
from indico.core.marshmallow import mm
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.events.sessions.models.sessions import Session
class SessionBlockSchema(mm.SQLAlchemyAutoSchema):
room_name_verbose = fields.Function(lambda obj: obj.get_room_name(full=False, verbose=True))
class Meta:
| model | = SessionBlock
fields = ('id', 'title', 'code', 'start_dt', 'end_dt', 'duration', 'room_name', 'room_name_verbose')
class BasicSessionSchema(mm.SQLAlchemyAutoSchema):
blocks = fields.Nested(SessionBlockSchema, many=True)
class Meta:
model = Session
fields = ('id', 'title', 'friendly_id', 'blocks')
|
# !/usr/bin/env python
# -*-coding:utf-8-*-
# by huangjiangbo
# 部署服务
# deploy.py
from ConfigParser import ConfigParser
ConfigFile = r'config.ini' # 读取配置文件
config = ConfigParser()
config.read(ConfigFile)
de_infos = config. | items(r'deploy_server') # 远程部署服务器信息
redeploy_server_info = {}
appinfo = {}
print de_infos
for (key, value) in de_infos:
redeploy_server_info[key] = value
print redeploy_se | rver_info
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrati | ons
class Migration(migrations.Migration):
dependencies = [
('neuroelectro', '0023_auto_20160604_1620'),
]
operations = [
migrations.AlterField(
model_name='datatablestat',
name='last_cu | rated_on',
field=models.DateTimeField(null=True),
),
]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from rest_framework import generics, permiss | ions
from core.models import User
from core.seria | lizers import UserSerializer
class UserListAPIView(generics.ListAPIView):
"""
Read-only API View to list all users.
"""
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = (permissions.IsAuthenticated,)
paginate_by = 100
|
onse
oid_store = make_oid_store()
login_type = auth_type
openid_url = auth_id
if login_type == 'google':
openid_url = 'https://www.google.com/accounts/o8/id'
elif login_type == 'aol':
openid_url = 'http://openid.aol.com/'
elif login_type == 'yahoo':
openid_url = 'yahoo.com'
oid_consumer = consumer.Consumer(session, oid_store)
trust_root = url("main", qualified=True)
return_to = url('verify', qualified=True)
try:
req = oid_consumer.begin(openid_url)
except consumer.DiscoveryFailure:
helpers.flash(u"Error in discovery",'error')
session.save()
redirect(url(on_failure))
else:
if req is None:
helpers.flash(u"No OpenID services found for %s" % openid_url,'error')
session.save()
redirect(url(on_failure))
else:
sreg_request = sreg.SRegRequest(required=['nickname'], optional=['fullname', 'email'])
req.addExtension(sreg_request)
pape_request = pape.Request([pape.AUTH_PHISHING_RESISTANT])
req.addExtension(pape_request)
if req.shouldSendRedirect():
redirect_url = req.redirectURL(trust_root, return_to)
response.status_int = 302
response.headers['location'] = redirect_url
return ""
else:
return req.htmlMarkup(realm=trust_root,return_to=return_to)
def wrapper(func,self,*args,**kwargs):
pylons_obj = self._py_object
request_post = pylons_obj.request.POST
session = pylons_obj.session
url = pylons_obj.url
try:
auth_type = request_post[type_param]
auth_id = request_post[id_param]
except KeyError:
redirect(url(on_failure))
else:
session['auth_type'] = auth_type
session.save()
if auth_type == 'twitter':
return _oauth_handler(pylons_obj, auth_type, auth_id)
elif auth_type == 'facebook':
return _facebook_handler(pylons_obj, auth_type, auth_id)
else:
return _openid_handler(pylons_obj, auth_type, auth_id)
# in case we find ourselves here for some reason
return func(self,*arg,**kwargs)
return decorator(wrapper)
def AuthenticationResponse():
def _oauth_handler(pylons_obj):
from columns.model import User, meta
session = pylons_obj.session
url = pylons_obj.url
app_globals = pylons_obj.app_globals
request_token = session.pop('oauth_request_token')
twitter_key, twitter_secret = app_globals.settings(u'twitter_oauth_key', u'auth'), app_globals.settings(u'twitter_oauth_secret', u'auth')
#twitter = oauthtwitter.OAuthApi(twitter_key, twitter_secret, request_token)
twitter = oauthtwitter.OAuthApi(twitter_key, twitter_secret)
#access_token = twitter.getAccessToken()
access_token = twitter.getAccessToken(request_token)
twitter = oauthtwitter.OAuthApi(twitter_key, twitter_secret, access_token)
user = twitter.VerifyCredentials() #twitter.GetUserInfo()
session['auth_type'] = 'twitter'
#session['auth_oid'] = user.id
session['auth_oid'] = user['id']
try:
return meta.Session.query(User).filter(User.twitter_id==unicode(session['auth_oid'])).one()
except:
return None
def _facebook_handler(pylons_obj):
from columns.model import User, meta
session = pylons_obj.session
url = pylons_obj.url
app_globals = pylons_obj.app_globals
request = pylons_obj.request
try:
fbcode = request.params.get("code")
except KeyError:
redirect(url("login"))
args = {
'client_id':app_globals.settings(u'facebook_api_key', u'auth'),
'redirect_uri':url('verify',qualified=True),
'client_secret':app_globals.settings(u'facebook_secret', u'auth'),
'code':fbcode,
}
fb_response = cgi.parse_qs(urllib.urlopen(
"https://graph.facebook.com/oauth/access_token?" +
urllib.urlencode(args)).read())
try:
access_token = fb_response["access_token"][-1]
except KeyError:
redirect(url('challenge', auth_type='facebook'))
#return fb_login(g, session, request, response, 'facebook', None)
# Download the user profile and cache a local instance of the
# basic profile info
profile = json.load(urllib.urlopen(
"https://graph.facebook.com/me?" +
urllib.urlencode(dict(access_token=access_token))))
session['auth_type'] = 'facebook'
session['auth_oid'] = profile["id"]
try:
return meta.Session.query(User).filter(User.fb_id==unicode(session['auth_oid'])).one()
except:
return None
def _openid_handler(pylons_obj):
from columns.model import User, meta
session = pylons_obj.session
url = pylons_obj.url
g = pylons_obj.app_globals
request = pylons_obj.request
oid_store = make_oid_store()
oid_consumer = consumer.Consumer(session, oid_store)
info = oid_consumer.complete(request.params, url('verify', qualified=True))
sreg_resp = None
pape_resp = None
display_identifier = info.getDisplayIdentifier()
if info.status == consumer.FAILURE and display_identifier:
helpers.flash(u"Verification of %(display_identifier)s failed: %(message)s" % {'display_identifier':display_identifier,'message':info.message},'error')
elif info.status == consumer.SUCCESS:
sreg_resp = sreg.SRegResponse.fromSuccessResponse(info)
#pape_resp = pape.Response.fromSuccessResponse(info)
if info.endpoint.canonicalID:
session['auth_oid'] = info.endpoint.canonicalID
else:
session['auth_oid'] = display_identifier
try:
return meta.Session.query(User).filter(User.open_id==unicode(session['auth_oid'])).one()
except:
return None
elif info.status == consumer.CANCEL:
helpers.flash(u'Verification cancelled','error')
elif info.status == consumer.SETUP_NEEDED:
setup_url = info.setup_url
if setup_url:
helpers.flash(u'<a href=%s>Setup needed</a>' % helpers.literal(setup_url),'error')
else:
helpers.flash(u'Setup needed','error')
else:
helpers.flash(u'Verification failed.')
redirect(url("login"))
def wrapper(func,self,*args,**kwargs):
pylons_obj = self._py_object
session = pylons_obj.session
url = pylons_obj.url
try:
auth_type = session['auth_type']
except KeyError:
auth_type = pylons_obj.request.params.get('auth_type')
if auth_type == 'twitter':
user = _oauth_handler(pylons_obj)
elif auth_type == 'facebook':
user = _facebook_handler(pylons_obj)
else:
user = _openid_handler(pylons_obj)
if user is not None:
store_user(pylons_obj.session, user)
else:
if 'return_to' in session and 'modifying' in session:
#we are a | dding a new authentication method to an existing account
return_to = session.pop('return_to')
session.pop('modifying')
redirect(url(**return_to))
else:
#we are making a new account
#redirect(ur | l("new_account"))
user = create_user(session)
store_user(pylons_obj.session, user)
if 'return_to' in session:
return_to = session.pop('return_to')
redirect(url(**return_to))
else:
redirect(url("main"))
return decorator(wrapper)
from pylons.util import call_wsgi_application
class AuthenticationMiddleware(object):
"""Internally redirects a request based on status code
AuthenticationMiddleware watches the response of the app it wraps. If the
response is an error code in the errors sequence passed the request
will be re-run with the path URL set to the path passed in.
This operation is non-recursive and the output of the second
request will be used no matter what it is.
Should an application wish to bypass the error response (to
purposely return a 401), set
``environ['columns.authentication_redirect'] = True`` in the application.
"""
def __init__(self, app, login_path='/login'):
"""Initialize the ErrorRedirect
``errors``
A sequence (list, tuple) of error code integers that should
be caught.
``path``
The path to set for the next request down to the
application.
"""
self.app = app
self.login_path = login_path
# Transform errors to str for comparison
self.errors = ['401']
def __call__(self, environ, start_response):
#this is from StatusCodeRedirect
status, headers, app_iter, exc_info = call_wsgi_application(
self.app, environ, catch_exc_info=True
)
if status[:3] in self.errors and 'columns.authentication_redirect' not in environ and self.login_path:
# Create a response object
environ['pylons.original_response'] = Respons |
#!/usr/bin/python
"""nrvr.xml.etree - Utilities for xml.etree.ElementTree
The main class provided by this module is ElementTreeUtil.
To be expanded as needed.
Idea and first implementation - Leo Baschy <srguiwiz12 AT nrvr DOT com>
Public repository - https://github.com/srguiwiz/nrvr-commander
Copyright (c) Nirvana Research 2006-2015.
Simplified BSD License"""
import copy
import xml.etree.ElementTree
class ElementTreeUtil(object):
"""Utilities for xml.etree.ElementTree.
Written for Python 2.6."""
@classmethod
def indent(cls, element, indent=" ", level=0):
"""Set whitespace for indentation.
element
an xml.etree.ElementTree.Element instance.
indent
the additional indent for each level down.
level
increases on recursive calls.
Need not be set on regular use."""
levelIndent = "\n" + level * indent
if len(element):
# element has child element
if not element.text or not element.text.strip():
# element has no text or text is only whitespace
element.text = levelIndent + indent
for child in element:
# child indented one level more
cls.indent(child, indent=indent, level=level + 1)
if not child.tail or not child.tail.strip():
# last child has no tail or tail is only whitespace
child.tail = levelIndent
if level > 0:
# any level except top level
if not element.tail or not element.tail.strip():
# element has no tail or tail is only whitespace
element.tail = levelIndent
else:
# top level
element.tail = ""
@classmethod
def unindent(cls, element):
"""Remove whitespace from indentation.
element
an xml.etree.ElementTree.Element instance."""
if len(element):
# element has child element
if not element.text or not element.text.strip():
# element has no text or text is only whitespace
element.text = ""
for child in element:
# child indented one level more
cls.unindent(child)
if not element.tail or not element.tail.strip():
# element has no tail or tail is only whitespace
element.tail = ""
@classmethod
def tostring(cls, element, indent=" ", xml_declaration=True, encoding="utf-8"):
"""Generate a string representation.
element
an xml.etree.ElementTree.Element instance.
Tolerates xml.etree.ElementTree.ElementTree.
indent
the additional indent for each level down.
If None then unindented.
xml_declaration
whether with XML declaration <?xml version="1.0" encodi | ng="utf-8"?>."""
# tolerate tree instead of element
if isinstance(element, xml.etree.ElementTree.ElementTree):
# if given a tree
element = element.getroot()
element = copy.deepcopy(element)
if indent is not None:
cls.indent(element, indent)
else:
cls.unindent(element)
string = xml.etree.ElementTree.tostring(element, encoding=encoding)
if xml_declaration:
| string = '<?xml version="1.0" encoding="{0}"?>\n'.format(encoding) + string
return string
@classmethod
def simpledict(cls, element):
"""Generate a dictionary from child element tags and text.
element
an xml.etree.ElementTree.Element instance."""
children = element.findall('*')
dictionary = {}
for child in children:
dictionary[child.tag] = child.text
return dictionary
if __name__ == "__main__":
import sys
tree = xml.etree.ElementTree.ElementTree(xml.etree.ElementTree.XML \
("""<e1 a1="A1">
<e2 a2="A2">E2</e2>
<e3 a3="A3">E3</e3>
<e4><e5/></e4>
<e6/></e1>"""))
tree.write(sys.stdout)
print # a newline after the write of unindented XML
ElementTreeUtil.indent(tree.getroot())
tree.write(sys.stdout)
print # a newline after the write of unindented XML
print xml.etree.ElementTree.tostring(tree.getroot())
ElementTreeUtil.unindent(tree.getroot())
tree.write(sys.stdout)
print # a newline after the write of unindented XML
print ElementTreeUtil.tostring(tree)
print ElementTreeUtil.tostring(tree.getroot())
print ElementTreeUtil.tostring(tree, indent=None)
|
# -*- coding: utf-8 -*-
import fixed_size_text as ft
nu = ft.Num()
it = ft.Int()
ymd = ft.DatYYYMMDD()
ah1 = [ft.col(8), # Ονομα Αρχείου
ft.col(8, 0, ' ', ymd), # Ημερομηνία δημιουργίας
ft.col(4), # Αρ.Κύκλου τρεξίματος
ft.col(127) # Filler
]
h1 = ft.row(0, ah1)
ah2 = [ft.col(4), # Έτος
ft.col(18), # Επώνυμο
ft.col(9), # Όνομα
ft.col(3), # Πατρώνυμο
ft.col(1), # 0=Επωνυμία, 1=Ονοματεπώνυμο
ft.col(9), # ΑΦΜ
ft.col(16), # Αντικείμενο Δραστηριότητας
ft.col(10), # Πόλη
ft.col(16), # Οδός
ft.col(5), # Αριθμός
ft.col(5), # Τ.Κ.
ft.col(51) # Filler
]
h2 = ft.row(1, ah2)
ah3 = [ft.col(16, 1, '0', nu), # Ακαθάριστες αποδοχές
ft.col(16, 1, '0', nu), # Κρατήσεις
ft.col(16, 1, '0', nu), # Καθαρές Αποδοχές
ft.col(15, 1, '0', nu), # Φόρος που αναλογεί
ft.col(15, 1, '0', nu), # Φόρος που παρακρατήθηκε
ft.col(15, 1, '0', nu), # Ειδική Εισφορά Αλληλεγγύης
ft.col(14, 1, '0', nu), # Χαρτόσημο
ft.col(13, 1, '0', nu), # ΟΓΑ Χαρτοσήμου
ft.col(27) # Filler
]
h3 = ft.row(2, ah3)
ah4 = [ft.col(9), # ΑΦΜ εργαζομένου
ft.col(1), # Filler
ft.col(18), # Επώνυμο
ft.col(9), # Όνομα
ft.col(3), # Όνομα συζύγου ή πατέρα
ft.col(11), # ΑΜΚΑ
ft.col(2, 1, '0', it), # Αριθμός παιδιών
ft.col(2), # Είδος αποδοχών 01=Τακτικές αποδοχές
ft.col(11, 1, '0', nu), # Ακαθάριστες αποδοχές
ft.col(10, 1, '0', nu), # Κρατήσεις
ft.col(11, 1, '0', nu), # Καθαρές Αποδοχές
ft.col(10, 1, '0', nu), # Φόρος που αναλογεί
ft.col(10, 1, '0', nu), # Φόρος που παρακρατήθηκε
ft.col(10, 1, | '0', nu), # Ειδική Εισφορά Αλληλεγγύη | ς
ft.col(9, 1, '0', nu), # Χαρτόσημο
ft.col(8, 1, '0', nu), # ΟΓΑ Χαρτοσήμου
ft.col(4, 1, '0', it), # Έτος Αναφοράς για Αναδρομικές αποδ.
ft.col(9), # Filler
]
h4 = ft.row(3, ah4)
r1 = h1.write(['JL10', '2016-01-15', '2015', ''])
a2 = ['2015',
u'ΑΚΤΗ ΦΑΡΑΓΓΑ ΠΑΡΟΥ ΕΠΕ',
'',
'',
0,
'999249820',
u'ΕΣΤΙΑΤΟΡΙΟ ΜΠΑΡ',
u'ΠΑΡΟΣ',
u'ΑΓΑΘΗΜΕΡΟΥ',
'3',
'84400',
''
]
r2 = h2.write(a2)
a3 = [20220.98, 3575.14, 16645.84, 0, 0, 0, 0, 0, '']
r3 = h3.write(a3)
a4 = ['034140096', '', u'ΛΑΖΑΡΟΣ', u'ΘΕΟΔΩΡΟΣ', u'ΚΩΝΣΤΑΝΤ', '02108001427',
1, '01',
20220.98, 3575.14, 16645.84, 0, 0, 0, 0, 0, '', ''
]
r4 = h4.write(a4)
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
rtel = r1 + r2 + r3 + r4
print(len(r1), len(r2), len(r3), len(r4))
f = open('tstfile', 'w')
f.write(rtel.encode('CP1253'))
f.close()
|
import | ply.lex as lex
from ast import Node
reserved = {
'or' : 'LOR',
'and' : 'LAND',
'neg' : 'NEG',
'exists' : 'EXISTS',
'forall' : 'FORALL',
'implies': 'IMPLIES',
'iff' : 'IFF'
}
tokens = tuple(
[
'WORD',
'VARIABLE',
'CONSTANT',
'FUNCTION',
'PREDICATE',
'COMMA',
'LPAREN',
'RPAREN',
'LBRACKET',
'RBRACKET',
| ] + list(reserved.values())
)
t_COMMA = r','
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
def t_WORD(t):
r'[a-zA-Z][a-zA-Z]+' # HACK no reserved word has len < 2
type = reserved.get(t.value)
if not type:
t_error(t)
t.type = type
t.value = Node(type, t.value)
return t
def t_VARIABLE(t):
r'[u-z]'
t.value = Node('VARIABLE', t.value)
return t
def t_CONSTANT(t):
r'[a-e]'
t.value = Node('CONSTANT', t.value)
return t
def t_FUNCTION(t):
r'[f-j]'
t.value = Node('FUNCTION', t.value)
return t
def t_PREDICATE(t):
r'[P-U]'
t.value = Node('PREDICATE', t.value)
return t
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
t_ignore = ' \t'
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
#lexer = lex.lex(optimize=1, debug=1)
if __name__ == '__main__':
data = '''neg (exists x)(forall y)[P(x,y) iff neg Q(y, x, y)]'''
lexer.input(data)
for tok in lexer:
print(tok)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client i | mport ServiceClient
from msrest import Configuration, Serializer, Deserializer
from .version import VERSION
from .operations.duration_operations import DurationOperations
from . import models
class AutoRestDurationTestServiceConfiguration(Configuration):
"""Configuration for AutoRestDurationTestService
Note that all parameters used to create this instance are saved as instance
attributes.
:param str base_url: Service URL
:param str filepath: Existin | g config
"""
def __init__(
self, base_url=None, filepath=None):
if not base_url:
base_url = 'https://localhost'
super(AutoRestDurationTestServiceConfiguration, self).__init__(base_url, filepath)
self.add_user_agent('autorestdurationtestservice/{}'.format(VERSION))
class AutoRestDurationTestService(object):
"""Test Infrastructure for AutoRest
:ivar config: Configuration for client.
:vartype config: AutoRestDurationTestServiceConfiguration
:ivar duration: Duration operations
:vartype duration: .operations.DurationOperations
:param str base_url: Service URL
:param str filepath: Existing config
"""
def __init__(
self, base_url=None, filepath=None):
self.config = AutoRestDurationTestServiceConfiguration(base_url, filepath)
self._client = ServiceClient(None, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.duration = DurationOperations(
self._client, self.config, self._serialize, self._deserialize)
|
#!/usr/bin/env python
from os.path import join, | dirname
from cloudify import ctx
ctx.download_resource(
join('components', 'utils.py'),
join(dirname(__file__), 'utils.py'))
import utils # NOQA
runtime_props = ctx.instance.runtime_properties
if utils.is_upgrade:
SERVI | CE_NAME = runtime_props['service_name']
utils.validate_upgrade_directories(SERVICE_NAME)
utils.systemd.verify_alive(SERVICE_NAME, append_prefix=False)
|
from __future__ import absolute_import, unicode_literals
from django.contrib.admin.utils import quote
from django.utils.encoding import force_text
from django.utils.translation import ugettext as _
class ButtonHelper(object):
default_button_classnames = ['button']
add_button_classnames = ['bicolor', 'icon', 'icon-plus']
inspect_button_classnames = []
edit_button_classnames = []
delete_button_classnames = ['no']
def __init__(self, view, request):
self.view = view
self.request = request
self.model = view.model
self.opts = view.model._meta
self.verbose_name = force_text(self.opts.verbose_name)
self.verbose_name_plural = force_text(self.opts.verbose_name_plural)
self.permission_helper = view.permission_helper
self.url_helper = view.url_helper
def finalise_classname(self, classnames_add=None, classnames_exclude=None):
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
combined = self.default_button_classnames + classnames_add
finalised = [cn for cn in combined if cn not in classnames_exclude]
return ' '.join(finalised)
def add_button(self, classnames_add=None, classnames_exclude=None):
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
classnames = self.add_button_classnames + classnames_add
cn = self.finalise_classname(classnames, classnames_exclude)
return {
'url': self.url_helper.create_url,
'label': _('Add %s') % self.verbose_name,
'classname': cn,
'title': _('Add a new %s') % self.verbose_name,
}
def inspect_button(self, pk, classnames_add=None, classnames_exclude=None):
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
classnames = self.inspect_button_classnames + classnames_add
cn = self.finalise_classname(classnames, classnames_exclude)
return {
'url': self.url_helper.get_action_url('inspect', quote(pk)),
'label': _('Inspect'),
'classname': cn,
'title': _('Inspect this %s') % self.verbose_name,
}
def edit_button(self, pk, classnames_add=None, class | names_exclude=None):
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
classnames = self.edit_button_classnames + classnames_add
cn = self.finalise_classname(classnames, classnames_exclude)
return {
'url': self.url_helper.get_action_url('edit', quote(pk)),
'label': _('Edit'),
'classname': cn,
'title': _('Edit this %s | ') % self.verbose_name,
}
def delete_button(self, pk, classnames_add=None, classnames_exclude=None):
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
classnames = self.delete_button_classnames + classnames_add
cn = self.finalise_classname(classnames, classnames_exclude)
return {
'url': self.url_helper.get_action_url('delete', quote(pk)),
'label': _('Delete'),
'classname': cn,
'title': _('Delete this %s') % self.verbose_name,
}
def get_buttons_for_obj(self, obj, exclude=None, classnames_add=None,
classnames_exclude=None):
if exclude is None:
exclude = []
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
ph = self.permission_helper
usr = self.request.user
pk = getattr(obj, self.opts.pk.attname)
btns = []
if('inspect' not in exclude and ph.user_can_inspect_obj(usr, obj)):
btns.append(
self.inspect_button(pk, classnames_add, classnames_exclude)
)
if('edit' not in exclude and ph.user_can_edit_obj(usr, obj)):
btns.append(
self.edit_button(pk, classnames_add, classnames_exclude)
)
if('delete' not in exclude and ph.user_can_delete_obj(usr, obj)):
btns.append(
self.delete_button(pk, classnames_add, classnames_exclude)
)
return btns
class PageButtonHelper(ButtonHelper):
unpublish_button_classnames = []
copy_button_classnames = []
def unpublish_button(self, pk, classnames_add=None, classnames_exclude=None):
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
classnames = self.unpublish_button_classnames + classnames_add
cn = self.finalise_classname(classnames, classnames_exclude)
return {
'url': self.url_helper.get_action_url('unpublish', quote(pk)),
'label': _('Unpublish'),
'classname': cn,
'title': _('Unpublish this %s') % self.verbose_name,
}
def copy_button(self, pk, classnames_add=None, classnames_exclude=None):
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
classnames = self.copy_button_classnames + classnames_add
cn = self.finalise_classname(classnames, classnames_exclude)
return {
'url': self.url_helper.get_action_url('copy', quote(pk)),
'label': _('Copy'),
'classname': cn,
'title': _('Copy this %s') % self.verbose_name,
}
def get_buttons_for_obj(self, obj, exclude=None, classnames_add=None,
classnames_exclude=None):
if exclude is None:
exclude = []
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
ph = self.permission_helper
usr = self.request.user
pk = getattr(obj, self.opts.pk.attname)
btns = []
if('inspect' not in exclude and ph.user_can_inspect_obj(usr, obj)):
btns.append(
self.inspect_button(pk, classnames_add, classnames_exclude)
)
if('edit' not in exclude and ph.user_can_edit_obj(usr, obj)):
btns.append(
self.edit_button(pk, classnames_add, classnames_exclude)
)
if('copy' not in exclude and ph.user_can_copy_obj(usr, obj)):
btns.append(
self.copy_button(pk, classnames_add, classnames_exclude)
)
if('unpublish' not in exclude and ph.user_can_unpublish_obj(usr, obj)):
btns.append(
self.unpublish_button(pk, classnames_add, classnames_exclude)
)
if('delete' not in exclude and ph.user_can_delete_obj(usr, obj)):
btns.append(
self.delete_button(pk, classnames_add, classnames_exclude)
)
return btns
|
"""
For simplicity this module should be a stand-alone package, i.e., it should not use any
non-standard python packages such as mooseutils.
"""
import copy
from . import search
class Node(object):
"""
Base class for tree nodes that accepts arbitrary attributes.
Create a new node in the tree that is a child of *parent* with the given *name*. The supplied
*parent* must be another `Node` object. All keyword arguments are stored as "attributes" and may
be of any type.
!alert warning title=Speed is Important!
The need for this object comes from the MooseDocs package, which uses tree objects extensively.
Originally, MooseDocs used the anytree package for these structures. As the MooseDocs system
evolved as well as the amount of documentation, in particular the amount of generated HTML
output, the speed in creating the tree nodes became critical. The anytree package is robust and
well designed, but the construction of the nodes was not fast enough.
"""
def __init__(self, parent, name, **kwargs):
"""
This constructor must be as minimal as possible for speed purposes.
IMPORTANT: Do not add more items to this unless you have good reason, it will impact
MooseDocs performance greatly.
"""
self.__children = list()
self.__parent = parent
self.__name = name
self.__attributes = kwargs
if self.__parent is not None:
parent.__children.append(self)
| @property
def name(self):
"""Return the name of the Node."""
return self.__name
@property
def parent(self):
"""Return the parent Node object, which is None for a root node."""
return self.__parent
@parent.setter
def parent(self, new_parent):
"""Set the parent Node object to *new_parent*, use None to remove the node from the tree."""
if (self.__parent is not None) and (self in | self.__parent.__children):
self.__parent.__children.remove(self)
self.__parent = new_parent
if self.__parent is not None:
self.__parent.__children.append(self)
@property
def children(self):
"""Return a list of children.
!alert note
The list is a copy but the Node objects in the list are not.
"""
return copy.copy(self.__children)
@property
def descendants(self):
"""Return a list of all descendants, children's children etc."""
return search.iterate(self, method=search.IterMethod.PRE_ORDER)
@property
def count(self):
"""Return the number of all descendants"""
count = len(self.__children)
for child in self.__children:
count += child.count
return count
def __iter__(self):
"""Iterate of the children (e.g., `for child in node:`)"""
return iter(self.__children)
def insert(self, idx, child):
"""Insert a nod *child* before the supplied *idx* in the list of children."""
self.__children.insert(idx, child)
child.__parent = self
@property
def path(self):
"""Return the nodes that lead to the root node of the tree from this node."""
nodes = [self]
parent = self.__parent
while parent is not None:
nodes.insert(0, parent)
parent = parent.parent
return nodes
@property
def root(self):
"""Return the root node of the tree."""
return self.path[0]
@property
def is_root(self):
"""Return True if the Node is a root, i.e., is the parent node object set to None."""
return self.__parent is None
@property
def siblings(self):
"""Return a list of sibling nodes."""
if self.__parent is not None:
children = self.__parent.children
children.remove(self)
return children
return []
@property
def previous(self):
"""Return the previous sibling, if it exists."""
if (self.__parent is not None) and (self.__parent.__children):
idx = self.__parent.__children.index(self)
if idx > 0:
return self.__parent.__children[idx-1]
@property
def next(self):
"""Return the next sibling, if it exists."""
if (self.__parent is not None) and (self.__parent.__children):
idx = self.__parent.__children.index(self)
if idx < len(self.__parent.__children) - 1:
return self.__parent.__children[idx+1]
def __call__(self, *args):
"""Return child nodes based on index."""
child = self
for index in args:
child = child.__children[index]
return child
@property
def attributes(self):
"""Return the a 'attributes' (key, value pairs supplied in construction) for this node."""
return self.__attributes
def __getitem__(self, key):
"""Retrieve an attribute using operator[]."""
return self.__attributes[key]
def __setitem__(self, key, value):
"""Set an attribute using operator[]."""
self.__attributes[key] = value
def __contains__(self, key):
"""Test if an attribute exists using the 'in' keyword."""
return key in self.__attributes
def get(self, key, default=None):
"""Return the value of an attribute *key* or *default* if it does not exist."""
return self.__attributes.get(key, default)
def items(self):
"""Return the dict() iterator to the attributes, i.e., `k, v in node.items()`."""
return self.__attributes.items()
def __len__(self):
"""Return the number of children."""
return len(self.__children)
def __bool__(self):
"""If this class exists then it should evaluate to True."""
return True
def __str__(self):
"""Return a unicode string showing the tree structure."""
return self.__print()
def __repr__(self):
"""Return the 'name' of the object as it should be printed in the tree."""
if self.__attributes:
return '{}: {}'.format(self.name, repr(self.__attributes))
return self.name
def __print(self, indent=u''):
"""Helper function printing to the screen."""
if (self.parent is None) or (self.parent.children[-1] is self):
out = u'{}\u2514\u2500 {}\n'.format(indent, repr(self))
indent += u" "
else:
out = u'{}\u251c\u2500 {}\n'.format(indent, repr(self))
indent += u"\u2502 "
for child in self.children:
out += child.__print(indent)
return out
|
import sys
import json
def test_parameter(name, valu | e):
assert value is not None
print("Tested parameter '{0}' is {1}".format(name, value))
if __name__ == '__main__':
with open("{0}/input.json".format(sys.argv[1]), 'r') as fh:
data = json.load(fh)
parameters = data.get('kwargs', {})
expected_parameters = parameters.pop('to_be_tested')
for k, v in parameters.items():
if k in expected_parameters:
test_parameter(k, v)
expecte | d_parameters.remove(k)
if expected_parameters:
raise Exception("These parameters were not tested: {0}"
.format(expected_parameters))
|
#!/usr/bin/python
import unittest, os
try:
import autotest.common as common
except ImportError:
import common
from autotest_lib.client.common_lib.test_utils import mock
from autotest_lib.client.bin import package, os_dep, utils
class TestPackage(unittest.TestCase):
def setUp(self):
self.god = mock.mock_god()
self.god.stub_function(os_dep, "command")
def tearDown(self):
self.god.unstub_all()
def info_common_setup(self, input_package, result):
self.god.stub_function(os.path, "isfile")
self.god.stub_function(utils, "system_output")
self.god.stub_function(utils, "system")
# record
os.path.isfile.expect_call(input_package).and_return(True)
utils.system_output.expect_call(
'file ' + input_package).and_return(result)
utils.system_output.expect_call(
'file ' + input_package).and_return(result)
def test_info_rpm(self):
# setup
input_package = "package.rpm"
file_result = "rpm"
ver = '1.0'
# common setup
self.info_common_setup(input_package, file_result)
# record
package_info = {}
package_info['type'] = 'rpm'
os_dep.command.expect_call('rpm')
s_cmd = 'rpm -qp --qf %{SOURCE} ' + input_package + ' 2>/dev/null'
a_cmd = 'rpm -qp --qf %{ARCH} ' + input_package + ' 2>/dev/null'
v_cmd = 'rpm -qp ' + input_package + ' 2>/dev/null'
utils.system_output.expect_call(v_cmd).and_return(ver)
i_cmd = 'rpm -q ' + ver + ' 2>&1 >/dev/null'
package_info['system_support'] = True
utils.system_output.expect_call(s_cmd).and_return('source')
package_info['source'] = True
utils.system_output.expect_call(v_cmd).and_return(ver)
package_info['version'] = ver
utils.system_output.expect_call(a_cmd).and_return('586')
package_info['arch'] = '586'
utils.system.expect_call(i_cmd)
package_info['installed'] = True
# run and check
info = package.info(input_package)
self.god.check_playback()
self.assertEquals(info, package_info)
def test_info_dpkg(self):
# setup
| input_package = "package.deb"
file_result = "debian"
ver = '1.0'
# common setup
self.info_common_setup(input_package, file_result)
# record
package_info = {}
package_info['type'] = 'dpkg'
package_info['source'] = Fal | se
os_dep.command.expect_call('dpkg')
a_cmd = 'dpkg -f ' + input_package + ' Architecture 2>/dev/null'
v_cmd = 'dpkg -f ' + input_package + ' Package 2>/dev/null'
utils.system_output.expect_call(v_cmd).and_return(ver)
i_cmd = 'dpkg -s ' + ver + ' 2>/dev/null'
package_info['system_support'] = True
utils.system_output.expect_call(v_cmd).and_return(ver)
package_info['version'] = ver
utils.system_output.expect_call(a_cmd).and_return('586')
package_info['arch'] = '586'
utils.system_output.expect_call(i_cmd,
ignore_status=True).and_return('installed')
package_info['installed'] = True
# run and check
info = package.info(input_package)
self.god.check_playback()
self.assertEquals(info, package_info)
def test_install(self):
# setup
input_package = "package.rpm"
self.god.stub_function(package, "info")
self.god.stub_function(utils, "system")
# record
package_info = {}
package_info['type'] = 'rpm'
package_info['system_support'] = True
package_info['source'] = True
package_info['installed'] = True
package.info.expect_call(input_package).and_return(package_info)
install_command = 'rpm %s -U %s' % ('', input_package)
utils.system.expect_call(install_command)
# run and test
package.install(input_package)
self.god.check_playback()
def test_convert(self):
os_dep.command.expect_call('alien')
dest_format = 'dpkg'
input_package = "package.rpm"
output = "package_output.deb"
# record
self.god.stub_function(utils, "system_output")
utils.system_output.expect_call(
'alien --to-deb %s 2>/dev/null' % input_package).and_return(output)
# run test
package.convert(input_package, dest_format)
self.god.check_playback()
def test_os_support_full(self):
# recording
exp_support = {}
for package_manager in package.KNOWN_PACKAGE_MANAGERS:
os_dep.command.expect_call(package_manager)
exp_support[package_manager] = True
os_dep.command.expect_call('alien')
exp_support['conversion'] = True
# run and test
support = package.os_support()
self.god.check_playback()
self.assertEquals(support, exp_support)
def test_os_support_none(self):
# recording
exp_support = {}
for package_manager in package.KNOWN_PACKAGE_MANAGERS:
os_dep.command.expect_call(package_manager).and_raises(ValueError)
exp_support[package_manager] = False
os_dep.command.expect_call('alien').and_raises(ValueError)
exp_support['conversion'] = False
# run and test
support = package.os_support()
self.god.check_playback()
self.assertEquals(support, exp_support)
if __name__ == "__main__":
unittest.main()
|
from subprocess import PIPE, Popen
from sqlalchemy import create_engine
def run(p):
try:
p["log"].info(p[" | action"]['query'])
proc = Popen(p["action"]['query'], shell=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
result = proc.communicate()
message = ''
for r in result:
| if r: message += r + '\n'
p["log"].success(message)
except Exception, e:
AllGood = False
p["log"].error("command line execution failed",e)
return True
|
from nltk import *
from nltk.corpus import brown
class ParallelismFinder:
def __init__(self):
self.f = ""
self.counter = 0
self.para = [] #array to hold instances of parallelism
self.tokenizer = RegexpTokenizer('\w+') #remove punctuation which could mess up finding parallelism
#Train tagger with subset of Brown News | Corpus
brown_news_tagged = brown.tagg | ed_sents(categories='news')
brown_train = brown_news_tagged
self.tagger = UnigramTagger(brown_train) #Unigram Tagger based on Brown corpus
#Path is inputted from AIPController
#Returns parallelism counter
def sendFile(self, path):
self.f = open(path)
for line in self.f:
try:
self.get_all_parallelism(line)
except:
continue
c = self.counter
self.counter = 0
self.para = [] #re-initialize to empty array
return c
#Returns the parallelism counter
def get_all_parallelism(self, line):
sent = self.tokenizer.tokenize(line)
tags = self.tagger.tag(sent)
self.get_phrase_parallelism(tags, 1)
self.get_phrase_parallelism(tags, 2) #Pairs of words
self.get_phrase_parallelism(tags, 3) #Triplets of words
self.get_phrase_parallelism(tags, 4) #Group of 4 words
#Get parallelism between n_para # of words
#Ex: the a
#Ex: the bird, the word
#Ex2: I came, I saw, I conquered
#Ex: Of the people, by the people, for the people
#Ex: the people are good, the people are bad
def get_phrase_parallelism(self, tags, n_para):
tagged1, tagged2 = [], []
words1, words2 = [], []
for n in range(0, len(tags)-n_para, n_para):
try:
tag_subset = tags[n:n+n_para]
tag = self.get_tags(tag_subset)
tagged1.append([tag])
tag_subset = tags[n+n_para:n+(2*n_para)]
tag = self.get_tags(tag_subset)
tagged2.append([tag])
word_subset = tags[n:n+n_para]
words1 = self.get_words(word_subset)
word_subset = tags[n+n_para:n+(2*n_para)]
words2 = self.get_words(word_subset)
if tagged1 == tagged2:
self.para.append([words1, words2])
self.counter += 1
tagged1, tagged2 = [], []
words1, words2 = [], []
except:
continue
#Get tags of phrases for comparison
def get_tags(self, tag_sub):
ret = []
for t in tag_sub:
ret.append(t[1])
return ret
#Get words of phrases for entrance into instance array
def get_words(self, word_sub):
ret = []
for t in word_sub:
ret.append(t[0])
return ret
|
import unittest
import numpy as np
import ndhist
class Test(unittest.TestCase):
def test_log10_axis_1D(self):
"""Tests if the log10_axis works with the ndhist object for 1D
histograms.
"""
axis_0 = ndhist.axes.log10(0.1, 100, 0.1)
self.a | ssertTrue(axis_0.nbins == 32)
h = ndhist.ndhist((axis_0,))
self.assertTrue(np.any(h.binentries) == False)
self.assertTrue(np.any(h.bincontent) == False)
h.fill([0.1, 0.2, 99.])
self.assertTrue(np.all(h.bincontent == np.array([
1. | , 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 1.])))
if(__name__ == "__main__"):
unittest.main()
|
from .settings import Settings
from .logger import | Logger
from .detection_result_recorder import DetectionResultRecorder
from .color_operations import ColorOperations
from .target_analyzer import TargetAnalyzer
from .background_color_nullifier import BackgroundColorNullifier
from .target_detectors import TargetDe | tectors
from .false_positive_eliminators import FalsePositiveEliminators
from .integrated_target_detection_process import IntegratedTargetDetectionProcess
from .integrated_target_capturing_process import IntegratedTargetCapturingProcess
from .single_target_map_detector import SingleTargetMapDetector
from .mass_target_detector import MassTargetDetector
|
"""
Copyright (c) 2010 cmiVFX.com <info@cmivfx.com>
This file is part of AtomSplitter.
AtomSplitter is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, ei | ther version 3 of the License, or
(at your option) any later version.
AtomSplitter is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more | details.
You should have received a copy of the GNU General Public License
along with AtomSplitter. If not, see <http://www.gnu.org/licenses/>.
Written by: Justin Israel
justinisrael@gmail.com
justinfx.com
""" |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# how to transform with pandas
from pandas import Series
from pandas import DataFrame
from scipy.stats import boxcox
from matplotlib | import pyplot
series = Series.from_csv( "airline-passengers.csv" , header=0)
dataframe = DataFrame(series.values)
dataframe.columns = ['passengers']
# print(dataframe['passengers'])
dataframe['passengers'], lam = boxcox(dataframe['passengers'])
print( "Lambda: %f" % lam)
pyplot.figure(1)
# line plot
pyplot.subplot(211)
pyplot.plot(dataframe['passengers'])
# histogram
pyplot.subplot(212)
pyplot.hist(dataframe['passengers'])
pyplot.show()
# dataframe = DataFrame(series.values)
# dataframe.columns = ["passengers"]
# dataframe["passengers"] = sqrt(dataframe["passengers"])
# pyplot.figure(1)
# # line plot
# pyplot.subplot(211)
# pyplot.plot(dataframe["passengers"])
# # histogram
# pyplot.subplot(212)
# pyplot.hist(dataframe["passengers"])
pyplot.show() | |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, json
from frappe.desk.form.load import run_onload
@frappe.whitelist()
def savedocs(do | c, action):
"""save / submit / update doclist"""
try:
doc = frappe.get_doc(json.loads(doc))
set_local_name(doc)
# action
doc.docstatus = {"Save":0, "Submit": 1, "Update": 1, "Cancel": 2}[action]
if doc.docsta | tus==1:
doc.submit()
else:
try:
doc.save()
except frappe.NameError as e:
doctype, name, original_exception = e if isinstance(e, tuple) else (doc.doctype or "", doc.name or "", None)
frappe.msgprint(frappe._("{0} {1} already exists").format(doctype, name))
raise
# update recent documents
run_onload(doc)
frappe.get_user().update_recent(doc.doctype, doc.name)
send_updated_docs(doc)
except Exception:
if not frappe.local.message_log:
frappe.msgprint(frappe._('Did not save'))
frappe.errprint(frappe.utils.get_traceback())
raise
@frappe.whitelist()
def cancel(doctype=None, name=None, workflow_state_fieldname=None, workflow_state=None):
"""cancel a doclist"""
try:
doc = frappe.get_doc(doctype, name)
if workflow_state_fieldname and workflow_state:
doc.set(workflow_state_fieldname, workflow_state)
doc.cancel()
send_updated_docs(doc)
except Exception:
frappe.errprint(frappe.utils.get_traceback())
frappe.msgprint(frappe._("Did not cancel"))
raise
def send_updated_docs(doc):
from .load import get_docinfo
get_docinfo(doc)
d = doc.as_dict()
if hasattr(doc, 'localname'):
d["localname"] = doc.localname
frappe.response.docs.append(d)
def set_local_name(doc):
def _set_local_name(d):
if doc.get('__islocal') or d.get('__islocal'):
d.localname = d.name
d.name = None
_set_local_name(doc)
for child in doc.get_all_children():
_set_local_name(child)
if doc.get("__newname"):
doc.name = doc.get("__newname")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016- | 03-18 21:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('draw', '0006_auto_20160314_1817'),
]
operations = [
migrations.AlterField(
model_name='physicalentity',
name='display_name',
field=models.CharField(blank=True, max_length=1000),
) | ,
]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import logging
import lazy
from ..exceptions import InvalidOutputError, SubprocessError, WhitespaceError
from .base import BaseCommand
__logger__ = logging.getLogger('pybsd')
class EzjailAdmin(BaseCommand):
"""Provides an interface to the ezjail-admin command"""
name = 'ezjail-admin'
@property
def binary(self):
return self.env.ezjail_admin_binary
def check_kwargs(self, subcommand, **kwargs):
# make sure there is no whitespace in the arguments
for key, value in kwargs.items():
if value is None:
continue
if subcommand == 'console' and key == 'cmd':
continue
if len(value.split()) != 1:
raise WhitespaceError(self, self.env, key, value, subcommand)
@lazy.lazy
def list_headers(self):
"""
rc: command return code
out: command stdout
err: command stderr
"""
rc, out, err = self.invoke('list')
if rc:
raise SubprocessError(self, self.env, err.strip(), 'list_headers')
lines = out.splitlines()
if len(lines) < 2:
raise InvalidOutputError(self, self.env, u'output too short', 'list')
headers = []
current = ''
for pos, char in enumerate(lines[1]):
if char != '-' or pos >= len(lines[0]):
headers.append(current.strip())
if pos >= len(lines[0]):
break
current = ''
else:
current = current + lines[0][pos]
if headers != ['STA', 'JID', 'IP', 'Hostname', 'Root Directory']:
raise InvalidOutputError(self, self.env, u"output has unknown headers\n['{}']".format(u"', '".join(headers)), 'list')
return ('status', 'jid', 'ip', 'name', 'root')
def list(self):
headers = self.list_headers
rc, out, err = self.invoke('list')
if rc:
raise SubprocessError(self, self.env, err.strip(), 'list')
lines = out.splitlines()
jails = {}
current_jail = None
for line in lines[2:]:
if line[0:4] != ' ':
line = line.strip()
if not line:
continue
entry = dict(zip(headers, line.split()))
entry['ips'] = [entry['ip']]
current_jail = jails[entry.pop('name')] = entry
else:
line = line.strip()
if not line:
continue
if_ip = line.split()[1]
ip = if_ip.split('|')[1]
current_jail['ips'].append(ip)
return jails
def console(self, cmd, jail_name):
self.check_kwargs('console', cmd=cmd, jail_name=jail_name)
rc, out, err = self.invoke('console',
'-e',
cmd,
jail_name)
return out
# subcommands to be implemented:
# def __ezjail_admin(self, subcommand, **kwargs):
# # make sure there is no whitespace in t | he arguments
# for key, value in kwargs.items():
# if value is None:
# continue
# if subcommand == 'console' and key == 'cmd':
# continue
# if len(value.split()) != 1:
# __logger__.error('The value `%s` of kwarg `%s` contains wh | itespace', value, key)
# sys.exit(1)
# if subcommand == 'console':
# return self._ezjail_admin(
# 'console',
# '-e',
# kwargs['cmd'],
# kwargs['name'])
# elif subcommand == 'create':
# args = [
# 'create',
# '-c', 'zfs']
# flavour = kwargs.get('flavour')
# if flavour is not None:
# args.extend(['-f', flavour])
# args.extend([
# kwargs['name'],
# kwargs['ip']])
# rc, out, err = self._ezjail_admin(*args)
# if rc:
# raise SubprocessError(self, self.env, err.strip(), 'create')
# elif subcommand == 'delete':
# rc, out, err = self._ezjail_admin(
# 'delete',
# '-fw',
# kwargs['name'])
# if rc:
# raise SubprocessError(self, self.env, err.strip(), 'delete')
# elif subcommand == 'start':
# rc, out, err = self._ezjail_admin(
# 'start',
# kwargs['name'])
# if rc:
# raise SubprocessError(self, self.env, err.strip(), 'start')
# elif subcommand == 'stop':
# rc, out, err = self._ezjail_admin(
# 'stop',
# kwargs['name'])
# if rc:
# raise SubprocessError(self, self.env, err.strip(), 'stop')
# else:
# raise ValueError('Unknown subcommand `%s`' % subcommand)
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# res_partner
# Copyright (c) 2013 Codeback Software S.L. (http://codeback.es)
# @author: Miguel García <miguel@codeback.es>
# @author: Javier Fuentes <javier@codeback.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the | GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# | This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
from datetime import datetime, timedelta
from openerp.tools.translate import _
class res_company(osv.osv):
"""añadimos los nuevos campos"""
_name = "res.company"
_inherit = "res.company"
_columns = {
'web_discount': fields.float('Descuento web (%)'),
}
|
from openflow.optin_manager.sfa.rspecs.elements.element import Element
|
class PLTag(Element):
fields | = [
'tagname',
'value',
]
|
pace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
afi_name = __builtin__.property(_get_afi_name, _set_afi_name)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict(
[("afi_name", afi_name), ("config", config), ("state", state)]
)
from . import config
from . import state
class afi(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/global/igp-shortcuts/afi. Each member element of
the container is represented as a class vari | able - with a specific
YANG type.
YANG Description: Address-family list.
"""
__slots__ = ("_path_helper", "_extmethods", "__afi_name", "__config", "__state")
_yang_name = "afi"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
|
self._path_helper = False
self._extmethods = False
self.__afi_name = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="afi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"global",
"igp-shortcuts",
"afi",
]
def _get_afi_name(self):
"""
Getter method for afi_name, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/igp_shortcuts/afi/afi_name (leafref)
YANG Description: Reference to address-family type.
"""
return self.__afi_name
def _set_afi_name(self, v, load=False):
"""
Setter method for afi_name, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/igp_shortcuts/afi/afi_name (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_afi_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_afi_name() directly.
YANG Description: Reference to address-family type.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError(
"Cannot set keys directly when" + " within an instantiated list"
)
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="afi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """afi_name must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="afi-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=True)""",
}
)
self.__afi_name = t
if hasattr(self, "_set"):
self._set()
def _unset_afi_name(self):
self.__afi_name = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="afi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/igp_shortcuts/afi/config (container)
YANG Description: This container defines ISIS Shortcuts configuration parameters
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/igp_shortcuts/afi/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: This container defines ISIS Shortcuts configuration parameters
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=Tr |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "A | S IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.db import | models
class users_folder_tree(models.Model):
name = models.CharField(max_length=256)
type = models.IntegerField(max_length=128)
parentID = models.IntegerField(max_length=128)
isFile = models.BooleanField()
sizebyte = models.IntegerField(max_length=128)
level = models.IntegerField(max_length=128)
companyid = models.IntegerField(max_length=128)
user_id = models.IntegerField(max_length=128)
isContainer = models.BooleanField()
competence = models.IntegerField(max_length=128)
MD5string = models.CharField(max_length=256)
SHA1string = models.CharField(max_length=256)
CRC32string = models.CharField(max_length=256)
FileLink = models.FloatField(max_length=128)
isDeleted = models.IntegerField(max_length=128)
|
#!/usr/bin/python
#======================================================================
#
# Project : hpp_IOStressTest
# File : Libs/IOST_WMain/IOST_WMainSATA.py
# Date : Oct 20, 2016
# Author : HuuHoang Nguyen
# Contact : hhnguyen@apm.com
# : hoangnh.hpp@gmail.com
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import sys
import time
from IOST_Basic import *
from IOST_Config import *
from IOST_Testcase import *
import gtk
import gtk.glade
import gobject
#======================================================================
try:
IOST_DBG_EN
if IOST_DBG_EN:
IOST_WMainSATA_DebugEnable =1
else:
IOST_WMainSATA_DebugEnable =0
except:
IOST_DBG_EN = False
IOST_WMainSATA_DebugEnable =0
#======================================================================
class IOST_WMain_SATA():
"""
"""
#----------------------------------------------------------------------
def __init__(self, glade_filename, window_name, builder=None):
"""
"""
self.IOST_WMainSATA_WindowName = window_name
if not builder:
self.IOST_WMainSATA_Builder = gtk.Builder()
self.IOST_WMainSATA_Builder.add_from_file(glade_filename)
self.IOST_WMainSATA_Builder.connect_signals(self)
else:
self.IOST_WMainSATA_Builder = builder
#----------------------------------------------------------------------
def SetValueToSATA_Obj(self, window_name):
"""
Init all SATA objects when start IOST Wmain program
"""
if self.IOST_Data["SATA"] == "Enable":
self.IOST_Objs[window_name]["_IP_Enable_SATA_CB"].set_active(True)
for i in range(0, self.IOST_Data["SATA_PortNum"]):
if self.IOST_Data["SATA"+str(i)][0] == "Disable":
self.IOST_Objs[window_name]["_Config_SATA"+str(i)+"_CB"].set_active(False)
self.IOST_Objs[window_name]["_Config_SATA"+str(i)+"_B"].set_sensitive(False)
else:
self.IOST_Objs[window_name]["_Config_SATA"+str(i)+"_CB"].set_active(True)
self.IOST_Objs[window_name]["_Config_SATA"+str(i)+"_B"].set_sensitive(True)
else:
self.IOST_Objs[window_name]["_IP_Enable_SATA_CB"].set_active(False)
for i in range(0, self.IOST_Data["SATA_PortNum"]):
self.IOST_Objs[window_name]["_Config_SATA"+str(i)+"_CB"].set_sensitive(False)
self.IOST_Objs[window_name]["_Config_SATA"+str(i)+"_B"].set_sensitive(False)
for i in range(0, self.IOST_Data["SATA_PortNum"]):
self.IOST_Data["SATA"+str(i)+"_TestCaseNum"] = len(self.IOST_Data["SATA"+str(i)]) - 1
#----------------------------------------------------------------------
def on_IOST_WMain_Config_SATA0_B_clicked(self, object, data=None):
"Control to ConfigSATA-0 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "SATA0")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_SATA0_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMainSATA_WindowName]["_Config_SATA0_CB"].get_active()
self.IOST_Objs[self.IOST_WMainSATA_WindowName]["_Config_SATA0_B"].s | et_sensitive(Res)
if (Res):
self.IOST_Data["SATA0"][0] = 'Enable'
else:
self.IOST_Data["SATA0"][0] = 'Disable'
if IOST_WMainSATA_DebugEnable:
iost_p | rint(IOST_DBG_L06, self.IOST_Data["SATA0"][0], "IOST_Data->SATA0_Enable")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_SATA1_B_clicked(self, object, data=None):
"Control to ConfigSATA-1 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "SATA1")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_SATA1_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMainSATA_WindowName]["_Config_SATA1_CB"].get_active()
self.IOST_Objs[self.IOST_WMainSATA_WindowName]["_Config_SATA1_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["SATA1"][0] = 'Enable'
else:
self.IOST_Data["SATA1"][0] = 'Disable'
if IOST_WMainSATA_DebugEnable:
iost_print(IOST_DBG_L06, self.IOST_Data["SATA1"][0], "IOST_Data->SATA1_Enable")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_SATA2_B_clicked(self, object, data=None):
"Control to ConfigSATA-2 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "SATA2")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_SATA2_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMainSATA_WindowName]["_Config_SATA2_CB"].get_active()
self.IOST_Objs[self.IOST_WMainSATA_WindowName]["_Config_SATA2_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["SATA2"][0] = 'Enable'
else:
self.IOST_Data["SATA2"][0] = 'Disable'
if IOST_WMainSATA_DebugEnable:
iost_print(IOST_DBG_L06, self.IOST_Data["SATA2"][0], "IOST_Data->SATA2_Enable")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_SATA3_B_clicked(self, object, data=None):
"Control to ConfigSATA-3 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "SATA3")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_SATA3_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMainSATA_WindowName]["_Config_SATA3_CB"].get_active()
self.IOST_Objs[self.IOST_WMainSATA_WindowName]["_Config_SATA3_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["SATA3"][0] = 'Enable'
else:
self.IOST_Data["SATA3"][0] = 'Disable'
if IOST_WMainSATA_DebugEnable:
iost_print(IOST_DBG_L06, self.IOST_Data["SATA3"][0], "IOST_Data->SATA3_Enable")
#----------------------------------------------------------------------
def on_IOST_WMain_IP_Enable_SATA_CB_toggled(self, object, data=None):
Res = self.IOST_Objs[self.IOST_WMainSATA_WindowName]["_IP_Enable_SATA_CB"].get_active()
self.IOST_WMain_SATA_set_sensitive_all(Res)
if Res:
self.IOST_Data["SATA"] = 'Enable'
else:
self.IOST_Data["SATA"] = 'Disable'
#----------------------------------------------------------------------
def IOST_WMain_SATA_set_sensitive_all(self, value):
for i in range(0, self.IOST_Data["SATA_PortNum"]):
self.IOST_Objs[self.IOST_WMainSATA_WindowName]["_Config_SATA"+str(i)+"_CB"].set_sensitive(value)
if self.IOST_Data["SATA"+str(i)][0] == "Enable" and value:
self.IOST_Objs[self.IOST_WMainSATA_WindowName]["_Config_SATA"+str(i)+"_B"].set_sensitive(value)
else:
self.IOST_Objs[self.IOST_WMainSATA_WindowName]["_Config_SATA"+str(i)+"_B"].set_sensitive(False)
|
import pandas as pd
import numpy
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
content = []
data = []
common = []
def remove_non_ascii(text):
return ''.join([i if ord(i) < 128 else ' ' for i in text])
def remove_words(array):
final = []
words = []
for line in array:
words = line.split()
for x in common:
if x in words:
words.remove(x)
# unneeded words removed, join them
new_line = ' '.join(words)
final.append(new_line)
return final
# 1.txt is where the first person's chat history is put
# and used to train the algorithm
with open("1.txt", encoding="utf8") as f:
content = f.readlines()
# common is a list of 100 most common English words
with open("common") as f:
common = f.readlines()
# Remove non-ASCII and common words from input
content = [w.replace("\n", '') for w in content]
content = [remove_non_ascii(w) for w in content]
content = remove_words(content)
for i in content:
data.append([i, "Person1"]) # First create 2D arrays
# Same thing with second person
with open("2.txt", encoding="utf8") as f:
content = f.readlines()
content = | [w.replace("\n", '') for w in content]
content = [remove_non_ascii(w) for w in content]
content = remove_words(content)
for i in content:
data.append([i, "Person2"])
# Third person
with | open("3.txt", encoding="utf8") as f:
content = f.readlines()
content = [w.replace("\n", '') for w in content]
content = [remove_non_ascii(w) for w in content]
content = remove_words(content)
for i in content:
data.append([i, "Person3"])
# You could add more people here
data = [[remove_non_ascii(item) for item in row] for row in data]
# We have data in the 2D array. Now we gotta convert to numpy array
data_frame = pd.DataFrame(data, columns=list('xy'))
# Shuffle data for randomness
data_frame = data_frame.reindex(numpy.random.permutation(data_frame.index))
# Create feautre vectors
count_vectorizer = CountVectorizer()
counts = count_vectorizer.fit_transform(data_frame['x'].values)
# Create a Multinomial Naive Bayes classifier with Laplace smoothing
# alpha parameter is 1 by default so it uses Laplace smoothing
classifier = MultinomialNB()
targets = data_frame['y'].values
classifier.fit(counts, targets)
success = 0
fail = 0
sample = [] # Put the test data in sample array
# Below file contains test data for first person
# You can substitute test data for any person
with open("test_P1.txt", encoding="utf8") as f:
sample = f.readlines()
sample = [w.replace("\n", '') for w in sample]
sample = [remove_non_ascii(w) for w in sample]
sample = remove_words(sample)
sample_count = count_vectorizer.transform(sample)
predictions = classifier.predict(sample_count)
for i in predictions:
if i == "Person1":
success += 1
else:
fail += 1
print("Success="+str(success)+"\nFail="+str(fail))
print("Success%="+str(success*100/(fail+success))[0:5]) |
import os
import | unittest
import synapse
import synapse.lib.datfile as s_datfile
from synapse.tests.common import *
syndir = os.path.dirname(synapse.__file__)
class DatFileTest(SynTest):
def test_datfile_basic(self):
with s_datfile.openDatFile('synapse.tests/test.dat') as fd:
self.nn(fd)
self.eq(fd.read(), | b'woot\n')
|
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
def test_basic():
s = pd.Series([[0, 1, 2], np.nan, [], (3, 4)], index=list("abcd"), name="foo")
result = s.explode()
expected = pd.Series(
[0, 1, 2, np.nan, np.nan, 3, 4], index=list("aaabcdd"), dtype=object, name="foo"
)
tm.assert_series_equal(result, expected)
def test_mixed_type():
s = pd.Series(
[[0, 1, 2], np.nan, None, np.array([]), pd.Series(["a", "b"])], name="foo"
)
result = s.explode()
expected = pd.Series(
[0, 1, 2, np.nan, None, np.nan, "a", "b"],
index=[0, 0, 0, 1, 2, 3, 4, 4],
dtype=object,
name="foo",
)
tm.assert_series_equal(result, expected)
def test_empty():
s = pd.Series(dtype=object)
result = s.explode()
expected = s.copy()
tm.assert_series_equal(result, expected)
def test_nested_lists():
s = pd.Series([[[1, 2, 3]], [1, 2], 1])
result = s.explode()
expected = pd.Series([[1, 2, 3], 1, 2, 1], index=[0, 1, 1, 2])
tm.assert_series_equal(result, expected)
def test_multi_index():
s = pd.Series(
[[0, 1, 2], np.nan, [], (3, 4)],
name="foo",
index=pd.MultiIndex.from_product([list("ab"), range(2)], names=["foo", "bar"]),
)
result = s.explode()
index = pd.MultiIndex.from_tuples(
[("a", 0), ("a", 0), ("a", 0), ("a", 1), ("b", 0), ("b", 1), ("b", 1)],
names=["foo", "bar"],
)
expected = pd.Series(
[0, 1, 2, np.nan, np.nan, 3, 4], index=index, dtype=object, name="foo"
)
tm.assert_series_equal(result, expected)
def test_large():
s = pd.Series([range(256)]).explode()
result = s.explode()
tm.assert_series_equal(result, s)
def test_invert_array():
df = pd.DataFrame({"a": pd.date_range("20190101", periods=3, tz="UTC")})
listify = df.apply(lambda x: x.array, axis=1)
result = listify.explode()
tm.assert_series_equal(result, df["a"].rename())
@pytest.mark.parametrize(
"s", [pd.Series([1, 2, 3]), pd.Series(pd.date_range("2019", periods=3, tz="UTC"))]
)
def non_object_dtype(s):
result = s.explode()
tm.assert_series_equal(result, s)
def test_typical_usecase():
df = pd.DataFrame(
[{"var1": "a,b,c", "var2": 1}, {"var1": "d,e,f", "var2": 2}],
columns=["var1", "var2"],
)
exploded = df.var1.str.split(",").explode()
result = df[["var2"]].join(exploded)
expected = pd.DataFrame(
{"var2": [1, 1, 1, 2, 2, 2], "var1": list("abcdef")},
columns=["var2", "var1"],
index=[0, 0, 0, 1, 1, 1],
)
tm.assert_frame_equal(result | , expected)
def test_nested_EA():
# a nested EA array
s = pd.Series(
[
pd.date_range("20170101", periods=3, tz="UTC"),
pd.date_range("20170104", periods=3, tz="UTC"),
]
)
result = s.explode()
expected = pd.Series(
pd.date_range("20170101", periods=6, tz="UTC"), index=[0, 0, 0, 1, 1, 1]
)
tm.assert_series_equal(result, expected)
def test_duplicate_in | dex():
# GH 28005
s = pd.Series([[1, 2], [3, 4]], index=[0, 0])
result = s.explode()
expected = pd.Series([1, 2, 3, 4], index=[0, 0, 0, 0], dtype=object)
tm.assert_series_equal(result, expected)
def test_ignore_index():
# GH 34932
s = pd.Series([[1, 2], [3, 4]])
result = s.explode(ignore_index=True)
expected = pd.Series([1, 2, 3, 4], index=[0, 1, 2, 3], dtype=object)
tm.assert_series_equal(result, expected)
def test_explode_sets():
# https://github.com/pandas-dev/pandas/issues/35614
s = pd.Series([{"a", "b", "c"}], index=[1])
result = s.explode().sort_values()
expected = pd.Series(["a", "b", "c"], index=[1, 1, 1])
tm.assert_series_equal(result, expected)
def test_explode_scalars_can_ignore_index():
# https://github.com/pandas-dev/pandas/issues/40487
s = pd.Series([1, 2, 3], index=["a", "b", "c"])
result = s.explode(ignore_index=True)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(result, expected)
|
The Supervisor SID/URI when executing the Supervise instruction
:param bool end_conference_on_customer_exit: Whether to end the conference when the customer leaves
:param bool beep_on_customer_entrance: Whether to play a notification beep when the customer joins
:param unicode if_match: The If-Match HTTP request header
:returns: The updated ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
"""
data = values.of({
'ReservationStatus': reservation_status,
'WorkerActivitySid': worker_activity_sid,
'Instruction': instruction,
'DequeuePostWorkActivitySid': dequeue_post_work_activity_sid,
'DequeueFrom': dequeue_from,
'DequeueRecord': dequeue_record,
'DequeueTimeout': dequeue_timeout,
'DequeueTo': dequeue_to,
'DequeueStatusCallbackUrl': dequeue_status_callback_url,
'CallFrom': call_from,
'CallRecord': call_record,
'CallTimeout': call_timeout,
'CallTo': call_to,
'CallUrl': call_url,
'CallStatusCallbackUrl': call_status_callback_url,
'CallAccept': call_accept,
'RedirectCallSid': redirect_call_sid,
'RedirectAccept': redirect_accept,
'RedirectUrl': redirect_url,
'To': to,
'From': from_,
'StatusCallback': status_callback,
'StatusCallbackMethod': status_callback_method,
'StatusCallbackEvent': serialize.map(status_callback_event, lambda e: e),
'Timeout': timeout,
'Record': record,
'Muted': muted,
'Beep': beep,
'StartConferenceOnEnter': start_conference_on_enter,
'EndConferenceOnExit': end_conference_on_exit,
'WaitUrl': wait_url,
'WaitMethod': wait_method,
'EarlyMedia': early_media,
'MaxParticipants': max_participants,
'ConferenceStatusCallback': conference_status_callback,
'ConferenceStatusCallbackMethod': conference_status_callback_method,
'ConferenceStatusCallbackEvent': serialize.map(conference_status_callback_event, lambda e: e),
'ConferenceRecord': conference_record,
'ConferenceTrim': conference_trim,
'RecordingChannels': recording_channels,
'RecordingStatusCallback': recording_status_callback,
'RecordingStatusCallbackMethod': recording_status_callback_method,
'ConferenceRecordingStatusCallback': conference_recording_status_callback,
'ConferenceRecordingStatusCallbackMethod': conference_recording_status_callback_method,
'Region': region,
'SipAuthUsername': sip_auth_username,
'SipAuthPassword': sip_auth_password,
'DequeueStatusCallbackEvent': serialize.map(dequeue_status_callback_event, lambda e: e),
'PostWorkActivitySid': post_work_activity_sid,
'SupervisorMode': supervisor_mode,
'Supervisor': supervisor,
'EndConferenceOnCustomerExit': end_conference_on_customer_exit,
'BeepOnCustomerEntrance': beep_on_customer_entrance,
})
headers = values.of({'If-Match': if_match, })
payload = self._version.update(method='POST', uri=self._uri, data=data, headers=headers, )
return ReservationInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in s | elf._solution.items())
return '<Twil | io.Taskrouter.V1.ReservationContext {}>'.format(context)
class ReservationInstance(InstanceResource):
class Status(object):
PENDING = "pending"
ACCEPTED = "accepted"
REJECTED = "rejected"
TIMEOUT = "timeout"
CANCELED = "canceled"
RESCINDED = "rescinded"
WRAPPING = "wrapping"
COMPLETED = "completed"
class CallStatus(object):
INITIATED = "initiated"
RINGING = "ringing"
ANSWERED = "answered"
COMPLETED = "completed"
class ConferenceEvent(object):
START = "start"
END = "end"
JOIN = "join"
LEAVE = "leave"
MUTE = "mute"
HOLD = "hold"
SPEAKER = "speaker"
class SupervisorMode(object):
MONITOR = "monitor"
WHISPER = "whisper"
BARGE = "barge"
def __init__(self, version, payload, workspace_sid, task_sid, sid=None):
"""
Initialize the ReservationInstance
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
"""
super(ReservationInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'reservation_status': payload.get('reservation_status'),
'sid': payload.get('sid'),
'task_sid': payload.get('task_sid'),
'worker_name': payload.get('worker_name'),
'worker_sid': payload.get('worker_sid'),
'workspace_sid': payload.get('workspace_sid'),
'url': payload.get('url'),
'links': payload.get('links'),
}
# Context
self._context = None
self._solution = {
'workspace_sid': workspace_sid,
'task_sid': task_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ReservationContext for this ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
"""
if self._context is None:
self._context = ReservationContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def date_created(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def reservation_status(self):
"""
:returns: The current status of the reservation
:rtype: ReservationInstance.Status
"""
return self._properties['reservation_status']
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def task_sid(self):
"""
:returns: The SID of the reserved Task resource
:rtype: unicode
"""
return self._properties['task_sid']
@property
def worker_name(self):
"""
:returns: The friendly_name of the Worker that is re |
"""
hosts = []
# get all hosts from DNS
ips = socket.getaddrinfo('all.api.radio-browser.info',
80, 0, 0, socket.IPPROTO_TCP)
for ip_tupple in ips:
ip = ip_tupple[4][0]
# do a reverse lookup on every one of the ips to have a nice name for it
host_addr = socket.gethostbyaddr(ip)
# add the name to a list if not already in there
if host_addr[0] not in hosts:
hosts.append(host_addr[0])
# sort list of names
random.shuffle(hosts)
# add "https://" in front to make it an url
xbmc.log("Found hosts: " + ",".join(hosts))
return list(map(lambda x: "https://" + x, hosts))
def LANGUAGE(id):
# return id
# return "undefined"
return addon.getLocalizedString(id).encode('utf-8')
def build_url(query):
return base_url + '?' + urllib.urlencode(query)
def addLink(stationuuid, name, url, favicon, bitrate):
li = xbmcgui.ListItem(name, iconImage=favicon)
li.setProperty('IsPlayable', 'true')
li.setInfo(type="Music", infoLabels={ "Title":name, "Size":bitrate})
localUrl = build_url({'mode': 'play', 'stationuuid': stationuuid})
if stationuuid in my_stations:
contextTitle = LANGUAGE(32009)
contextUrl = build_url({'mode': 'delstation', 'stationuuid': stationuuid})
else:
contextTitle = LANGUAGE(32010)
contextUrl = build_url({'mode': 'addstation', 'stationuuid': stationuuid, 'name': name.encode('utf-8'), 'url': url, 'favicon': favicon, 'bitrate': bitrate})
li.addContextMenuItems([(contextTitle, 'RunPlugin(%s)'%(contextUrl))])
xbmcplugin.addDirectoryItem(handle=addon_handle, url=localUrl, listitem=li, isFolder=False)
def downloadFile(uri, param):
"""
Download file with the correct headers set
Returns:
a string result
"""
paramEncoded = None
if param != None:
paramEncoded = json.dumps(param)
xbmc.log('Request to ' + uri + ' Params: ' + ','.join(param))
else:
xbmc.log('Request to ' + uri)
req = urllib2.Request(uri, paramEncoded)
req.add_header('User-Agent', 'KodiRadioBrowser/1.2.0')
req.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(req)
data=response.read()
response.close()
return data
def downloadApiFile(path, param):
"""
Download file with relative url from a random api server.
Retry with other api servers if failed.
Returns:
a string result
"""
servers = get_radiobrowser_base_urls()
i = 0
for server_base in servers:
xbmc.log('Random server: ' + server_base + ' Try: ' + str(i))
uri = server_base + path
try:
data = downloadFile(uri, param)
return data
except Exception as e:
xbmc.log("Unable to download from api url: " + uri, xbmc.LOGERROR)
pass
i += 1
return {}
def addPlayableLink(data):
dataDecoded = json.loads(data)
for station in dataDecoded:
addLink(station['stationuuid'], station['name'], station['url'], station['favicon'], station['bitrate'])
def readFile(filepath):
with open(filepath, 'r') as read_file:
return json.load(read_file)
def writeFile(filepath, data):
with open(filepath, 'w') as write_file:
return json.dump(data, write_file)
def addToMyStations(stationuuid, name, url, favicon, bitrate):
my_stations[stationuuid] = {'stationuuid': stationuuid, 'name': name, 'url': url, 'bitrate': bitrate, 'favicon': favicon}
writeFile(mystations_path, my_stations)
def delFromMyStations(stationuuid):
if stationuuid in my_stations:
del my_stations[stationuuid]
writeFile(mystations_path, my_stations)
xbmc.executebuiltin('Container.Refresh')
# create storage
if not xbmcvfs.exists(profile):
xbmcvfs.mkdir(profile)
if xbmcvfs.exists(mystations_path):
my_stations = readFile(mystations_path)
else:
writeFile(mystations_path, my_stations)
mode = args.get('mode', None)
if mode is None:
localUrl = build_url({'mode': 'stations', 'url': '/json/stations/topclick/100'})
li = xbmcgui.ListItem(LANGUAGE(32000), iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=localUrl, listitem=li, isFolder=True)
localUrl = build_url({'mode': 'stations', 'url': '/json/stations/topvote/100'})
li = xbmcgui.ListItem(LANGUAGE(32001), iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=localUrl, listitem=li, isFolder=True)
localUrl = build_url({'mode': 'stations', 'url': '/json/stations/lastchange/100'})
li = xbmcgui.ListItem(LANGUAGE(32002), iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=localUrl, listitem=li, isFolder=True)
localUrl = build_url({'mode': 'stations', 'url': '/json/stations/lastclick/100'})
li = xbmcgui.ListItem(LANGUAGE(32003), iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=localUrl, listitem=li, isFolder=True)
localUrl = build_url({'mode': 'tags'})
li = xbmcgui.ListItem(LANGUAGE(32004), iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=localUrl, listitem=li, isFolder=True)
localUrl = build_url({'mode': 'countries'})
li = xbmcgui.ListItem(LANGUAGE(32005), iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=localUrl, listitem=li, isFolder=True)
localUrl = build_url({'mode': 'search'})
li = xbmcgui.ListItem(LANGUAGE(32007), iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=localUrl, listitem=li, isFolder=True)
localUrl = build_url({'mode': 'mystations'})
li = xbmcgui.ListItem(LANGUAGE(32008), iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url | =localUrl, listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
elif mode[0] == 'tags':
data = downloadApiFile('/json/tags', None)
dataDecoded = json.loads(data)
for tag in dataDecoded: |
tagName = tag['name']
if int(tag['stationcount']) > 1:
try:
localUrl = build_url({'mode': 'stations', 'key': 'tag', 'value' : base64.b32encode(tagName.encode('utf-8'))})
li = xbmcgui.ListItem(tagName, iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=localUrl, listitem=li, isFolder=True)
except Exception as e:
xbmc.err(e)
pass
xbmcplugin.endOfDirectory(addon_handle)
elif mode[0] == 'countries':
data = downloadApiFile('/json/countries', None)
dataDecoded = json.loads(data)
for tag in dataDecoded:
countryName = tag['name']
if int(tag['stationcount']) > 1:
try:
localUrl = build_url({'mode': 'states', 'country': base64.b32encode(countryName.encode('utf-8'))})
li = xbmcgui.ListItem(countryName, iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=localUrl, listitem=li, isFolder=True)
except Exception as e:
xbmc.log("Stationcount is not of type int", xbmc.LOGERROR)
pass
xbmcplugin.endOfDirectory(addon_handle)
elif mode[0] == 'states':
country = args['country'][0]
country = base64.b32decode(country)
country = country.decode('utf-8')
data = downloadApiFile('/json/states/'+urllib.quote(country)+'/', None)
dataDecoded = json.loads(data)
localUrl = build_url({'mode': 'stations', 'key': 'country', 'value': base64.b32encode(country.encode('utf-8'))})
li = xbmcgui.ListItem(LANGUAGE(32006), iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=localUrl, listitem=li, isFolder=True)
for tag in dataDecoded:
stateName = tag['name']
if int(tag['stationcount']) > 1:
try:
localUrl = build_url({'mode': 'stations', 'key': 'state','value':base64.b32encode(stateName.encode('utf-8'))})
li = xbmcgui. |
#!/usr/bin/env python
import os, sys
def touch(path):
with open(path, 'a'):
os.utime(path, None)
fout = open('sample.fileids', 'wb')
fout2 = open('sample.transcription', 'wb')
for fn in sorted(os.listdir('.')):
if not fn.e | ndswith('.wav'):
continue
base_fn = os.path.splitext(fn)[0]
txt_fn = base_fn + '.txt'
touch(txt_fn)
text = open(txt_fn).read().strip()
if text and not text.startswith('#'):
fout.write('samples/%s\n' % base_fn)
f | out2.write('<s> %s <s> (%s)\n' % (text, base_fn))
print 'Done.'
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from cron_status import *
class TestChangeDetection(unittest.TestCase):
"""Test if the change detection is operational."""
# Please note that status_history_list is backwards,
# i.e., newest entry first.
def test_ | all_okay(self):
status_history_list = [
{'foo': (ContainerStatus.OKAY, 'no msg')}
] * (STATUS_HISTORY_LENGTH + 1)
changed, status = detect_flapping_and_changes(status_history_list)
self.assertFalse(changed)
self.assertEqual(changed, status[0].changed) # because there is only 1 container
self.assertEqual(status[0].overall | _status, ContainerStatus.OKAY)
self.assertEqual(status[0].current_status, ContainerStatus.OKAY)
self.assertTrue(status[0].container_name in status_history_list[0])
self.assertEqual(status[0].current_msg, status_history_list[0][status[0].container_name][1])
def test_all_failed(self):
status_history_list = [
{'foo': (ContainerStatus.FAILED, 'no msg')}
] * (STATUS_HISTORY_LENGTH + 1)
changed, status = detect_flapping_and_changes(status_history_list)
self.assertFalse(changed)
self.assertEqual(changed, status[0].changed) # because there is only 1 container
self.assertEqual(status[0].overall_status, ContainerStatus.FAILED)
self.assertEqual(status[0].current_status, ContainerStatus.FAILED)
def test_failed_after_starting_short(self):
status_history_list = [{'foo': (ContainerStatus.FAILED, 'no msg')}]
status_history_list += [
{'foo': (ContainerStatus.STARTING, 'no msg')}
] * (STATUS_HISTORY_LENGTH - 1)
status_history_list += [{'foo': (ContainerStatus.OKAY, 'no msg')}]
changed, status = detect_flapping_and_changes(status_history_list)
self.assertTrue(changed)
self.assertEqual(status[0].overall_status, ContainerStatus.FAILED)
def test_failed_after_starting_very_long(self):
status_history_list = [{'foo': (ContainerStatus.FAILED, 'no msg')}]
status_history_list += [
{'foo': (ContainerStatus.STARTING, 'no msg')}
] * STATUS_HISTORY_LENGTH
changed, status = detect_flapping_and_changes(status_history_list)
self.assertTrue(changed)
self.assertEqual(status[0].overall_status, ContainerStatus.FAILED)
def test_okay_after_failed(self):
status_history_list = [
{'foo': (ContainerStatus.OKAY, 'no msg')}
]
status_history_list += [
{'foo': (ContainerStatus.FAILED, 'no msg')}
] * STATUS_HISTORY_LENGTH
changed, status = detect_flapping_and_changes(status_history_list)
self.assertTrue(changed)
self.assertEqual(status[0].overall_status, ContainerStatus.OKAY)
def test_failed_after_okay(self):
status_history_list = [
{'foo': (ContainerStatus.FAILED, 'no msg')}
]
status_history_list += [
{'foo': (ContainerStatus.OKAY, 'no msg')}
] * STATUS_HISTORY_LENGTH
changed, status = detect_flapping_and_changes(status_history_list)
self.assertTrue(changed)
self.assertEqual(status[0].overall_status, ContainerStatus.FAILED)
def test_missing_data(self):
status_history_list = [
{'foo': (ContainerStatus.FAILED, 'no msg')}
] * (STATUS_HISTORY_LENGTH - 1)
status_history_list += [{'foo': (ContainerStatus.OKAY, 'no msg')}]
changed, status = detect_flapping_and_changes(status_history_list)
self.assertFalse(changed)
self.assertEqual(status[0].overall_status, ContainerStatus.FAILED)
def test_too_much_data(self):
status_history_list = [
{'foo': (ContainerStatus.OKAY, 'no msg')}
] * (STATUS_HISTORY_LENGTH + 1)
status_history_list += [{'foo': (ContainerStatus.FAILED, 'no msg')}]
changed, status = detect_flapping_and_changes(status_history_list)
self.assertFalse(changed)
self.assertEqual(status[0].overall_status, ContainerStatus.OKAY)
if __name__ == '__main__':
unittest.main()
|
path when dealing with generated headers. This value will be
# replaced dynamically for each configuration.
generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \
'$SHARED_INTERMEDIATE_DIR'
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
flavor = gyp.common.GetFlavor(params)
default_variables.setdefault('OS', flavor)
if flavor == 'win':
# # Copy additional generator configuration data from VS, which is shared by the Windows Ninja generator.
# import gyp.generator.msvs as msvs_generator
# generator_additional_non_configuration_keys = getattr(msvs_generator, 'generator_additional_non_configuration_keys', [])
# generator_additional_path_sections = getattr(msvs_generator, 'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name, params,
compiler_path):
"""Calculate the set of include directories to be used.
Returns:
A list including all the include_dir's specified for every target followed
by any include directories that were added as cflag compiler options.
"""
gyp_includes_set = set()
compiler_includes_list = []
# Find compiler's default include dirs.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-xc++', '-v', '-'])
proc = subprocess.Popen(args=command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.communicate()[1]
# Extract the list of include dirs from the output, which has this format:
# ...
# #include "..." search starts here:
# #include <...> search starts here:
# /usr/include/c++/4.6
# /usr/local/include
# End of search list.
# ...
in_include_list = False
for line in output.splitlines():
if line.startswith('#include'):
in_include_list = True
continue
if line.startswith('End of search list.'):
break
if in_include_list:
include_dir = line.strip()
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
flavor = gyp.common.GetFlavor(params)
for target_name in target_list:
target = target_dicts[target_name]
if config_name in target['configurations']:
config = target['configurations'][config_name]
# Look for any include dirs that were explicitly added via cflags. This
# may be done in gyp files to force certain includes to come at the end.
# TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and
# remove this.
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
cflags = msvs_settings.GetCflags(config_name)
else:
cflags = config['cflags']
for cflag in cflags:
if cflag.startswith('-I'):
include_dir = cflag[2:]
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
# Find standard gyp include dirs.
if 'include_dirs' in config:
include_dirs = config['include_dirs']
for shared_intermediate_dir in | shared_intermediate_dirs:
| for include_dir in include_dirs:
include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR',
shared_intermediate_dir)
if not os.path.isabs(include_dir):
base_dir = os.path.dirname(target_name)
include_dir = base_dir + '/' + include_dir
include_dir = os.path.abspath(include_dir)
gyp_includes_set.add(include_dir)
# Generate a list that has all the include dirs.
all_includes_list = list(gyp_includes_set)
all_includes_list.sort()
for compiler_include in compiler_includes_list:
if not compiler_include in gyp_includes_set:
all_includes_list.append(compiler_include)
# All done.
return all_includes_list
def GetCompilerPath(target_list, data, options):
"""Determine a command that can be used to invoke the compiler.
Returns:
If this is a gyp project that has explicit make settings, try to determine
the compiler from that. Otherwise, see if a compiler was specified via the
CC_target environment variable.
"""
# First, see if the compiler is configured in make's settings.
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_dict = data[build_file].get('make_global_settings', {})
for key, value in make_global_settings_dict:
if key in ['CC', 'CXX']:
return os.path.join(options.toplevel_dir, value)
# Check to see if the compiler was specified as an environment variable.
for key in ['CC_target', 'CC', 'CXX']:
compiler = os.environ.get(key)
if compiler:
return compiler
return 'gcc'
def GetAllDefines(target_list, target_dicts, _, config_name, params, compiler_path):
"""Calculate the defines for a project.
Returns:
A dict that includes explict defines declared in gyp files along with all of
the default defines that the compiler uses.
"""
# Get defines declared in the gyp files.
all_defines = {}
flavor = gyp.common.GetFlavor(params)
for target_name in target_list:
target = target_dicts[target_name]
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
extra_defines = msvs_settings.GetComputedDefines(config_name)
else:
extra_defines = []
if config_name in target['configurations']:
config = target['configurations'][config_name]
target_defines = config['defines']
else:
target_defines = []
for define in target_defines + extra_defines:
split_define = define.split('=', 1)
if len(split_define) == 1:
split_define.append('1')
if split_define[0].strip() in all_defines:
# Already defined
continue
all_defines[split_define[0].strip()] = split_define[1].strip()
# Get default compiler defines (if possible).
if flavor == 'win':
return all_defines # Default defines already processed in the loop above.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-dM', '-'])
cpp_proc = subprocess.Popen(args=command, cwd='.',
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cpp_output = cpp_proc.communicate()[0]
cpp_lines = cpp_output.split('\n')
for cpp_line in cpp_lines:
if not cpp_line.strip():
continue
cpp_line_parts = cpp_line.split(' ', 2)
key = cpp_line_parts[1]
if len(cpp_line_parts) >= 3:
val = cpp_line_parts[2]
else:
val = '1'
all_defines[key] = val
return all_defines
def WriteIncludePaths(out, eclipse_langs, include_dirs):
"""Write the includes section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.settingswizards.IncludePaths">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for include_dir in include_dirs:
out.write(' <includepath workspace_path="false">%s</includepath>\n' % include_dir)
out.write(' </language>\n')
out.write(' </section>\n')
def WriteMacros(out, eclipse_langs, def |
import time
import threading
import subprocess
import pygame.locals
vlc_path = 'C:\\Program Files (x86)\\VideoLAN\\VLC\\vlc.exe'
class Scene(threading.Thread):
def __init__(self, screen, games, games_manager):
threading.Thread.__init__(self)
self.scr | een = screen
self.games = games
self.games_manager = games_manager
class Du | mmyScene(Scene):
def run(self):
time.sleep(5)
class VideoScene(Scene):
def __init__(self, screen, games, games_manager, filename):
Scene.__init__(self, screen, games, games_manager)
self.filename = filename
def run(self):
subprocess.call([vlc_path, self.filename, '--play-and-exit', '--fullscreen'])#, shell=True)
if __name__ == '__main__':
import pygame
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode((700, 800))
s = VideoScene(screen, None, None, 'videos/p2.mpg')
s.start()
s.join()
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'fake-key'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'analytics',
'tests'
]
ROOT_URLCONF = 'tests.urls'
TEMPLATES = [
{
| 'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
},
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'N | AME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
} |
'''
Gen | erate all permutations of a given string
'''
def permutations(str):
if(len(str)==1):
return str[0]
permutation = permutations(str[1:])
word = str[0]
result = []
for perm in permutation:
for i in range(len(perm)+1): # note: len+1
result.append(perm[:i]+word+perm[i:])
return result
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('lists', '0002_auto_20150301_1937'),
]
| oper | ations = [
migrations.AlterModelOptions(
name='todo',
options={'ordering': ('created_at',)},
),
migrations.AlterModelOptions(
name='todolist',
options={'ordering': ('created_at',)},
),
migrations.AlterField(
model_name='todo',
name='creator',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True, related_name='todos'),
preserve_default=True,
),
migrations.AlterField(
model_name='todo',
name='todolist',
field=models.ForeignKey(to='lists.TodoList', related_name='todos'),
preserve_default=True,
),
migrations.AlterField(
model_name='todolist',
name='creator',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True, related_name='todolists'),
preserve_default=True,
),
]
|
from collections import Counter
import numpy as np
def run(word_gen, index, window_size, out_file):
context = []
pair_counts = Counter()
for word in word_gen:
context.append(index[word])
if len(context) > window_size * 2 + 1:
context.pop(0)
pair_counts = _process_context(context, pair_counts, window_ | size)
import pyximport
pyximport.install(setup_args={"include_dirs": np.get_include()})
from representations import sparse_io
sparse_io.export_mat_from_dict(pair_counts, out_file)
def _process_context(context, pair_counts, window_size):
if len(context) < window_size + 1:
return pair_counts
target = context[window_size]
indices = range(0, window_size)
indices.extend(range(window_size + 1, 2 * window_size + 1))
for i in indices | :
if i >= len(context):
break
pair_counts[(target, context[i])] += 1
return pair_counts
|
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
from zope.interface import Interface, implements
from flumotion.transcoder.admin import admintask
class ITranscoderBalancerListener(Interface):
def onSlotsAvailable(self, balancer, count):
pass
class TranscoderBalancerListener(object):
implements(ITranscoderBalancerListener)
def onSlotsAvailable(self, balancer, count):
pass
class TranscoderBalancer(object):
"""
Handle the distribution of transcoding tasks to a set of workerPxy.
"""
def __init__(self, listener=None):
self._listener = listener
self._workerTasks = {} # {workerPxy: [task]}
self._orphanes = []
self._current = 0
self._maximum = 0
## Public Methods | ##
def getAvailableSlots(self):
return max(self._ | maximum - self._current, 0)
def clearTasks(self):
self._current = 0
del self._orphanes[:]
for tasks in self._workerTasks.itervalues():
del tasks[:]
def addWorker(self, workerPxy):
assert not (workerPxy in self._workerTasks)
self._workerTasks[workerPxy] = []
self._maximum += workerPxy.getWorkerContext().getMaxTask()
def removeWorker(self, workerPxy):
assert workerPxy in self._workerTasks
self._maximum -= workerPxy.getWorkerContext().getMaxTask()
self._orphanes.extend(self._workerTasks[workerPxy])
del self._workerTasks[workerPxy]
def addTask(self, task, workerPxy=None):
assert admintask.IAdminTask.providedBy(task)
assert (workerPxy == None) or (workerPxy in self._workerTasks)
self._current += 1
if workerPxy:
max = workerPxy.getWorkerContext().getMaxTask()
curr = len(self._workerTasks[workerPxy])
if max > curr:
self._workerTasks[workerPxy].append(task)
task.suggestWorker(workerPxy)
return
self._orphanes.append(task)
def removeTask(self, task):
assert admintask.IAdminTask.providedBy(task)
if task in self._orphanes:
self._orphanes.remove(task)
self._current -= 1
return
for tasks in self._workerTasks.itervalues():
if task in tasks:
tasks.remove(task)
self._current -= 1
return
def balance(self):
def getSortedWorkers():
"""
Return all the workers with at least 1 free slot
with the ones with the most free slots first.
"""
lookup = dict([(w, float(len(t)) / w.getWorkerContext().getMaxTask())
for w, t in self._workerTasks.items()
if len(t) < w.getWorkerContext().getMaxTask()])
workerPxys = lookup.keys()
workerPxys.sort(key=lookup.get)
return workerPxys
if self._workerTasks:
# First remove the exceding tasks
for workerPxy, tasks in self._workerTasks.iteritems():
max = workerPxy.getWorkerContext().getMaxTask()
if len(tasks) > max:
diff = len(tasks) - max
oldTasks = tasks[diff:]
del tasks[diff:]
self._orphanes.extend(oldTasks)
for task in oldTasks:
task.suggestWorker(None)
# Then distribute the orphanes until there is
# no more free slots or no more orphane tasks
while True:
workerPxys = getSortedWorkers()
if not workerPxys: break
for workerPxy in workerPxys:
if not self._orphanes: break
tasks = self._workerTasks[workerPxy]
task = self._orphanes.pop()
tasks.append(task)
task.suggestWorker(workerPxy)
else:
continue
break
available = self.getAvailableSlots()
if self._listener and (available > 0):
self._listener.onSlotsAvailable(self, available)
|
import unittest
import pythran
import os.path
#pythran export a((float,(int,uintp),str list) list list)
#pythran export a(str)
#pythran export a( (str,str), int, intp list list)
#pythran export a( float set )
#pythran export a( bool:str dict )
#pythran export a( float )
#pythran export a( int8[] )
#pythran export a( int8[][] order (F))
#pythran export a( byte )
#pythran export a0( uint8 )
#pythran export a1( int16 )
#pythran export a2( uint16 )
#pythran export a3( int32 )
#pythran export a4( uint32 )
#pythran export a5( int64 )
#pythran export a6( uint64 )
#pythran export a7( float32 )
#pythran export a8( float64 )
#pythran export a9( complex64 )
#pythran export a10( complex128 )
#pythran export a( int8 set )
#pythran export b( int8 set? )
#pythran export a( uint8 list)
#pythran export a( int16 [], slice)
#pythran export a( uint16 [][] order(C))
#pythran export a( uint16 [::][])
#pythran export a( uint16 [:,:,:])
#pythran export a( uint16 [:,::,:])
#pythran export a( uint16 [,,,,])
#pythran export a( (int32, ( uint32 , int64 ) ) )
#pythran export a( uint64:float32 dict )
#pythran export a( float64, complex64, complex128 )
class TestSpecParser(unittest.TestCase):
def test_parser(self):
real_path = os. | path.splitext(os.path.realpath(__file__))[0]+".py"
with open(real_path) as fd:
print(pythran.spec_parser(fd.read()))
def test_invalid_specs0(self):
code = '#pythran export foo()\ndef foo(n): return n'
with self.assertRaises(pythran.syntax | .PythranSyntaxError):
pythran.compile_pythrancode("dumber", code)
def test_invalid_specs1(self):
code = '#pythran export boo(int)\ndef boo(): pass'
with self.assertRaises(pythran.syntax.PythranSyntaxError):
pythran.compile_pythrancode("dumber", code)
def test_invalid_specs2(self):
code = '#pythran export bar(int)\ndef foo(): pass'
with self.assertRaises(pythran.syntax.PythranSyntaxError):
pythran.compile_pythrancode("dumber", code)
def test_invalid_specs3(self):
code = '#pythran export bar(int, int?, int)\ndef bar(x, y=1, z=1): pass'
with self.assertRaises(pythran.syntax.PythranSyntaxError):
pythran.compile_pythrancode("dumber", code)
def test_multiline_spec0(self):
code = '''
#pythran export foo(
# )
def foo(): return
'''
self.assertTrue(pythran.spec_parser(code))
def test_multiline_spec1(self):
code = '''
#pythran export foo(int
#, int
# )
def foo(i,j): return
'''
self.assertTrue(pythran.spec_parser(code))
def test_multiline_spec2(self):
code = '''
# pythran export foo(int,
# float
#, int
# )
def foo(i,j,k): return
'''
self.assertTrue(pythran.spec_parser(code))
def test_crappy_spec0(self):
code = '''
# pythran export soo(int) this is an int test
def soo(i): return
'''
self.assertTrue(pythran.spec_parser(code))
def test_crappy_spec1(self):
code = '''
# pythran export poo(int)
#this is a pythran export test
def poo(i): return
'''
self.assertTrue(pythran.spec_parser(code))
def test_middle_spec0(self):
code = '''
def too(i): return
# pythran export too(int)
#this is a pythran export test
def bar(i): return
'''
self.assertTrue(pythran.spec_parser(code))
def test_middle_spec1(self):
code = '''
def zoo(i): return
#this is a pythran export test
# pythran export zoo(int)
#this is an export test
# pythran export zoo(str)
def bar(i): return
'''
self.assertEquals(len(pythran.spec_parser(code).functions), 1)
self.assertEquals(len(pythran.spec_parser(code).functions['zoo']), 2)
def test_var_export0(self):
code = '''
# pythran export coo
coo = 1
'''
self.assertTrue(pythran.spec_parser(code))
|
# vi: ts=4 expandtab
#
# Copyright (C) 2009-2010 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# | but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from StringIO import StringIO
import os
import socket
from cloudinit import helpers
from cloudinit import util
PUPPET_CONF_PATH = '/etc/puppet/puppet.conf'
| PUPPET_SSL_CERT_DIR = '/var/lib/puppet/ssl/certs/'
PUPPET_SSL_DIR = '/var/lib/puppet/ssl'
PUPPET_SSL_CERT_PATH = '/var/lib/puppet/ssl/certs/ca.pem'
def _autostart_puppet(log):
# Set puppet to automatically start
if os.path.exists('/etc/default/puppet'):
util.subp(['sed', '-i',
'-e', 's/^START=.*/START=yes/',
'/etc/default/puppet'], capture=False)
elif os.path.exists('/bin/systemctl'):
util.subp(['/bin/systemctl', 'enable', 'puppet.service'],
capture=False)
elif os.path.exists('/sbin/chkconfig'):
util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
else:
log.warn(("Sorry we do not know how to enable"
" puppet services on this system"))
def handle(name, cfg, cloud, log, _args):
# If there isn't a puppet key in the configuration don't do anything
if 'puppet' not in cfg:
log.debug(("Skipping module named %s,"
" no 'puppet' configuration found"), name)
return
puppet_cfg = cfg['puppet']
# Start by installing the puppet package if necessary...
install = util.get_cfg_option_bool(puppet_cfg, 'install', True)
version = util.get_cfg_option_str(puppet_cfg, 'version', None)
if not install and version:
log.warn(("Puppet install set false but version supplied,"
" doing nothing."))
elif install:
log.debug(("Attempting to install puppet %s,"),
version if version else 'latest')
cloud.distro.install_packages(('puppet', version))
# ... and then update the puppet configuration
if 'conf' in puppet_cfg:
# Add all sections from the conf object to puppet.conf
contents = util.load_file(PUPPET_CONF_PATH)
# Create object for reading puppet.conf values
puppet_config = helpers.DefaultingConfigParser()
# Read puppet.conf values from original file in order to be able to
# mix the rest up. First clean them up
# (TODO(harlowja) is this really needed??)
cleaned_lines = [i.lstrip() for i in contents.splitlines()]
cleaned_contents = '\n'.join(cleaned_lines)
puppet_config.readfp(StringIO(cleaned_contents),
filename=PUPPET_CONF_PATH)
for (cfg_name, cfg) in puppet_cfg['conf'].iteritems():
# Cert configuration is a special case
# Dump the puppet master ca certificate in the correct place
if cfg_name == 'ca_cert':
# Puppet ssl sub-directory isn't created yet
# Create it with the proper permissions and ownership
util.ensure_dir(PUPPET_SSL_DIR, 0771)
util.chownbyname(PUPPET_SSL_DIR, 'puppet', 'root')
util.ensure_dir(PUPPET_SSL_CERT_DIR)
util.chownbyname(PUPPET_SSL_CERT_DIR, 'puppet', 'root')
util.write_file(PUPPET_SSL_CERT_PATH, str(cfg))
util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root')
else:
# Iterate throug the config items, we'll use ConfigParser.set
# to overwrite or create new items as needed
for (o, v) in cfg.iteritems():
if o == 'certname':
# Expand %f as the fqdn
# TODO(harlowja) should this use the cloud fqdn??
v = v.replace("%f", socket.getfqdn())
# Expand %i as the instance id
v = v.replace("%i", cloud.get_instance_id())
# certname needs to be downcased
v = v.lower()
puppet_config.set(cfg_name, o, v)
# We got all our config as wanted we'll rename
# the previous puppet.conf and create our new one
util.rename(PUPPET_CONF_PATH, "%s.old" % (PUPPET_CONF_PATH))
util.write_file(PUPPET_CONF_PATH, puppet_config.stringify())
# Set it up so it autostarts
_autostart_puppet(log)
# Start puppetd
util.subp(['service', 'puppet', 'start'], capture=False)
|
fro | m jinja2 import Markup, contextfunction
from canvas.templatetags.jinja_base import (global_tag, filter_tag, render_jinja_to_string,
jinja_context_tag, update_context)
@global_tag
def activity_stream_item(activity, viewer):
ctx = {
'activity': activity,
'viewer': viewer,
}
return Markup(render_jinja_to_string(u'activity/types/{0}.html'.format(activity.TYPE), c | tx))
|
"""The auto-tuning module of tvm
This module includes:
* Tuning space definition API
* Efficient auto-tuners
* Tuning result and database support
* Distributed measurement to scale up tuning
"""
from . import database
from . import feature
from . import measure
from . import record
from . import task
from . import tu | ner
from . import util
from . import env
from . import tophub
# some shortcuts
from .measure import measure_option, MeasureInput, MeasureResult, MeasureErrorNo, \
LocalBuilder, LocalRunner, RPCRunner
from .tuner import callback
from .task import template, get_config, create, ConfigSpace, ConfigEntity, \
register_topi_compute, register_topi_schedule, \
DispatchCont | ext, FallbackContext, ApplyHistoryBest as apply_history_best
from .env import GLOBAL_SCOPE
|
#!/usr/bin/env python
from runtest import TestBase
class TestCase(TestBase):
def __init__(self):
TestBase.__init__(self, 'longjmp3', """
# DURATION TID FUNCTION
1.164 us [ 4107] | __monstartup();
0.657 us [ 4107] | __cxa_atexit();
[ 4107] | main() {
0.705 us [ 4107] | _setjmp() = 0;
1.823 us [ 4107] | getpid();
0.182 us [ 4107] | _setjmp() = 0;
[ 4107] | foo() {
[ 4107] | __longjmp_chk(1) {
8.790 us [ 4107] | } = 1; /* _setjmp */
0.540 us [ 4107] | getpid();
[ 4107] | bar() {
[ 4107] | baz() {
[ 4107] | __longjmp_chk(2) {
1.282 us [ 4107] | } = 2; /* _setjmp */
0.540 us [ 4107] | getpid();
[ 4107] | foo() {
[ 4107] | __longjmp_chk(3) {
0.578 us [ 4107] | } = 3; /* _setjmp */
[ 4107] | bar() {
[ 4107] | baz() {
[ 4107] | __longjmp_chk(4) {
0.642 us [ 4107] | } = 4; /* _setjmp */
18.019 us [ 4107] | } /* main */
""")
def build(self, name, cflags='', ldflags=''):
return Tes | tBase.build(s | elf, name, cflags + ' -D_FORTIFY_SOURCE=2', ldflags)
def runcmd(self):
args = '-A .?longjmp@arg2 -R .?setjmp@retval'
return '%s %s %s' % (TestBase.ftrace, args, 't-' + self.name)
def fixup(self, cflags, result):
return result.replace('__longjmp_chk', "longjmp")
|
# -*- coding: utf-8 -*-
"""
Testing WordProcessingDocument
"""
# $Id: test_wordprocessing.py 6355 2007-09-20 17:16:21Z glenfant $
import unittest
import os
from fixures import *
import openxmllib
class WordProcessingTest(unittest.TestCase):
"""Testing quer | ying properties from a document"""
def setUp(self):
test_file_path = os.path.join(TEST_FILES_IN, ALL_IN_FILES[0])
self.doc = openxmllib.openXmlDocument(test_file_path)
return
def test_indexableText(self):
"""Indexable text with properties"""
itext = self.doc.indexableText()
some_words = (u'A', u'full', u'chàractèrs', u'non', u'custom_value_2', u'title')
for word in some_w | ords:
self.failUnless(word in itext, "%s was expected" % word)
return
def test_indexableTextNoprop(self):
"""Indexable text without properties"""
itext = self.doc.indexableText(include_properties=False)
some_words = (u'A', u'full', u'chàractèrs', u'non')
for word in some_words:
self.failUnless(word in itext, "%s was expected" % word)
return
# /class WordProcessingTest
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(WordProcessingTest))
return suite
if __name__ == '__main__':
unittest.TextTestRunner().run(test_suite())
|
import sys
import math
# Don't let the machines win. You are humanity's last hope...
width = int(input()) # the number of cells on the X axis
height = int(input()) # the number of cells on the Y axis
Matrix = [[0 for x in range(height)] for x in range(width)]
# Creating the matrix
for i in range(height):
line = input() # width characters, each either 0 or .
for j in range(width):
Matrix[j][i] = line[j]
cArray = []
for i in range(width):
for j in range(height):
coordinates = ""
if Matrix[i][j] == '0':
coordinates = coordinates + str(i)+" "+str(j)+" "
# Find Next Node to the Right
k = i+1
print("k is "+str(k), file=sys.stderr)
while k != width+1:
if k != width:
if Matrix[k][j] == '0':
coordinat | es = coordinates + str(k)+" "+str(j)+" "
break
else:
coordinates = coordinates + "-1 -1 "
| k += 1
# Find Next Node to the Bottom
k = j+1
while k != height+1:
if k != height:
if Matrix[i][k] == '0':
coordinates = coordinates + str(i)+" " +str(k)+" "
break
else:
coordinates = coordinates + "-1 -1 "
k += 1
cArray.append(coordinates)
for c in cArray:
print(c) |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# 2009 Gary Burton
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
The EditPersonRef module provides the EditPersonRef class. This provides a
mechanism for the user to edit address information.
"""
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
#-------------------------------------------------------------------------
#
# GTK/Gnome modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.display.name import displayer as name_displayer
from .editsecondary import EditSecondary
from gramps.gen.lib import NoteType
from ..widgets import MonitoredEntry, PrivacyButton
from ..selectors import SelectorFactory
from .displaytabs import CitationEmbedList, NoteTab
from ..glade import Glade
#-------------------------------------------------------------------------
#
# EditPersonRef class
#
#-------------------------------------------------------------------------
class EditPersonRef(EditSecondary):
"""
Displays a dialog that allows the user to edit a person reference.
"""
def __init__(self, dbstate, uistate, track, addr, callback):
"""
Displays the dialog box.
parent - The class that called the PersonRef editor.
addr - The address that is to be edited
"""
EditSecondary.__init__(self, dbstate, uistate, track, addr, callback)
def _local_init(self):
self.width_key = 'interface.person-ref-width'
self.height_key = 'interface.person-ref- | height'
self.top = Glade()
self.set_window(self.top.toplevel,
self.top.get_object("title"),
_('Person Reference Editor'))
self.person_label = self.top.get_object('person')
def _setup_fields(self):
if self.obj.ref:
p = self.dbstate.db.get_person_from_handle | (self.obj.ref)
self.person_label.set_text(name_displayer.display(p))
self.street = MonitoredEntry(
self.top.get_object("relationship"),
self.obj.set_relation,
self.obj.get_relation,
self.db.readonly)
self.priv = PrivacyButton(
self.top.get_object("private"),
self.obj,
self.db.readonly)
def _connect_signals(self):
#self.define_help_button(self.top.get_object('help'))
self.define_cancel_button(self.top.get_object('cancel'))
self.define_ok_button(self.top.get_object('ok'),self.save)
self.top.get_object('select').connect('clicked',self._select_person)
def _connect_db_signals(self):
"""
Connect any signals that need to be connected.
Called by the init routine of the base class (_EditPrimary).
"""
self._add_db_signal('person-rebuild', self.close)
self._add_db_signal('person-delete', self.check_for_close)
def check_for_close(self, handles):
"""
Callback method for delete signals.
If there is a delete signal of the primary object we are editing, the
editor (and all child windows spawned) should be closed
"""
if self.obj.ref in handles:
self.close()
def _select_person(self, obj):
SelectPerson = SelectorFactory('Person')
sel = SelectPerson(self.dbstate, self.uistate, self.track)
person = sel.run()
if person:
self.obj.ref = person.get_handle()
self.person_label.set_text(name_displayer.display(person))
def _create_tabbed_pages(self):
"""
Create the notebook tabs and inserts them into the main
window.
"""
notebook = Gtk.Notebook()
self.srcref_list = CitationEmbedList(self.dbstate, self.uistate,
self.track,
self.obj.get_citation_list())
self._add_tab(notebook, self.srcref_list)
self.track_ref_for_deletion("srcref_list")
self.note_tab = NoteTab(self.dbstate, self.uistate, self.track,
self.obj.get_note_list(),
notetype=NoteType.ASSOCIATION)
self._add_tab(notebook, self.note_tab)
self.track_ref_for_deletion("note_tab")
self._setup_notebook_tabs(notebook)
notebook.show_all()
self.top.get_object('vbox').pack_start(notebook, True, True, 0)
def build_menu_names(self, obj):
return (_('Person Reference'),_('Person Reference Editor'))
def save(self,*obj):
"""
Called when the OK button is pressed. Gets data from the
form and updates the Address data structure.
"""
if self.obj.ref:
if self.callback:
self.callback(self.obj)
self.callback = None
self.close()
else:
from ..dialog import ErrorDialog
ErrorDialog(
_('No person selected'),
_('You must either select a person or Cancel '
'the edit'))
|
from __future__ import unicode_literals
from memory.mem import _Memory
class Windows2012ServerR2Memory(_Memory):
def __init__(self, params):
super(Windows2012ServerR2Memory, self).__init__(params)
def csv_all_modules_dll(self):
super(Windows2012ServerR2Memory, self)._csv_all_modules_dll()
def csv_all_modules_opened | _files(self):
super(Windows2012ServerR2Memor | y, self)._csv_all_modules_opened_files() |
, LineStringSeries, \
PolygonSeries, fetch_attr
from geocoon.meta import META_POINT, META_LINE_STRING, META_POLYGON
import unittest
class GeoDataFrameTestCase(unittest.TestCase):
"""
Basic GIS data frame tests.
"""
def test_dict_constructor(self):
"""
Test GIS data frame constructor with dictionary
"""
data = [Point(v, v * 2) for v in [1, 2]]
series = PointSeries(data)
df = GeoDataFrame({'a': series})
self.assertEqual(PointSeries, type(df.a))
def test_assign_new_col(self):
"""
Test assigning GIS series as column to GIS data frame
"""
data = [Point(v, v * 2) for v in [1, 2]]
series = PointSeries(data)
df = GeoDataFrame({})
df['a'] = series
self.assertEqual(PointSeries, type(df.a))
def test_grouping(self):
"""
Test GIS data frame grouping
"""
data = [Point(v, v * 2) for v in range(5)]
series = PointSeries(data)
data = {
'a': series,
'b': [4, 5, 5, 4, 5],
}
df = GeoDataFrame(data)
gdf = df.groupby('b')
df = gdf.get_group(4)
self.assertEqual(PointSeries, type(df.a))
self.assertTrue(all([0, 3] == df.a.x))
self.assertTrue(all([0, 6] == df.a.y))
df = gdf.get_group(5)
self.assertEqual(PointSeries, type(df.a))
self.assertTrue(all([1, 2, 4] == df.a.x))
self.assertTrue(all([2, 4, 8] == df.a.y))
def test_select(self):
"""
Test selecting from GIS data frame
"""
data = [Point(v, v * 2) for v in range(5)]
series = PointSeries(data)
data = {
'a': series,
'b': [4, 5, 5, 4, 5],
}
df = GeoDataFrame(data)
df = df[df.b == 4]
self.assertEqual(PointSeries, type(df.a))
self.assertTrue(all([4] * 2 == df.b))
class GeoSeriesTestCase(unittest.TestCase):
"""
Basic GIS series tests.
"""
def test_create(self):
"""
Test GIS series creation
"""
data = [Point(v, v * 2) for v in [1, 2]]
series = PointSeries(data)
self.assertEqual(PointSeries, type(series))
def test_fetch_attr(self):
"""
Test fetch GIS properties from GIS series
"""
data = [Point(v, v * 2) for v in [1, 2]]
series = PointSeries(data)
y = fetch_attr(series, name='y')
self.assertTrue(all(y == [2, 4]))
def test_select(self):
"""
Test selecting from GIS series
"""
data = [Point(v, v * 2) for v in [1, 2, 3, 4, 5, 6]]
series = PointSeries(data)
sub = series[(series.x < 4) & (series.y > 2)]
self.assertEqual(PointSeries, type(sub))
self.assertTrue(all([2, 3] == sub.x))
self.assertTrue(all([4, 6] == sub.y))
def test_select_single(self):
"""
Test selecting single GIS object
"""
data = [Point(v, v * 2) for v in [1, 2, 3, 4, 5, 6]]
series = PointSeries(data)
p = series[1]
self.assertEqual(Point, type(p))
def test_slice(self):
"""
Test slicing GIS series
"""
data = [Point(v, v * 2) for v in [1, 2, 3, 4, 5, 6]]
series = PointSeries(data)
sub = series[:3]
self.assertEqual(PointSeries, type(sub))
self.assertTrue(all([1, 2, 3] == sub.x))
self.assertTrue(all([2, 4, 6] == sub.y))
class PointSeriesTestCase(unittest.TestCase):
"""
Point GIS series unit tests.
"""
def test_property_adapt(self):
"""
Test adaptation of point properties
"""
data = [Point(v, v * 2, v * 3) for v in [5, 2, 4]]
series = PointSeries(data)
attrs = (k for k, v in META_POINT.items() if v.is_property)
for attr in attrs:
value = getattr(series, attr) # no error? good
self.assertEqual(3, len(value))
def test_method_adapt_buffer(self):
"""
Test adaptation of point buffer method
"""
data = [Point(v, v * 2, v * 3) for v in [5, 2, 4]]
series = PointSeries(data)
value = series.buffer(0.2, resolution=3) # no error? good
self.assertEqual(3, len(value))
self.assertEqual(PolygonSeries, type(value))
def test_method_adapt_geom(self):
"""
Test adaptation of point methods (first param is geometry)
"""
p1 = [Point(v, v * 2, v * 3) for v in [5, 2, 4]]
p2 = [Point(v, v * 2, v * 3) for v in [5, 2]] + [Point(4.1, 1, 1)]
s1 = PointSeries(p1)
s2 = PointSeries(p2)
methods = (k for k, v in META_POINT.items() if v.first_is_geom)
for method in methods:
mcall = getattr(s1, method) # no error? good
value = mcall(s2)
self.assertEqual(3, len(value))
self.assertTrue(all(not callable(v) for v in value))
# just in case
value = s1.equals(s2)
self.assertTrue(all([True, True, False] == value), value)
class LineStringSeriesTestCase(unittest.TestCase):
"""
Line string GIS series unit tests.
"""
def test_property_adapt(self):
"""
Test adaptation of line string properties
"""
d1 = tuple((v, v * 1, v * 4) for v in (5, 2, 4))
d2 = tuple((v, v * 2, v * 5) for v in [5, 2, 4])
d3 = tuple((v, v * 3, v * 6) for v in [5, 2, 4])
l1 = LineString(d1)
l2 = LineString(d2)
l3 = LineString(d3)
series = LineStringSeries([l1, l2, l3])
attrs = (k for k, v in META_LINE_STRING.items() if v.is_property)
for attr in attrs:
value = getattr(series, attr) # no error? good
self.assertEqual(3, len(value))
self.assertTrue(all(not callable(v) for v in value))
def test_method_adapt(self):
"""
Test adaptation of line string methods
"""
d1 = tuple((v, v * 1, v * 4) for v in (5, 2, 4))
d2 = tuple((v, v * 2, v * 5) for v in [5, 2, 4])
d3 = tuple((v, v * 3, v * 6) for v in [5, 2, 4])
d4 = tuple((v, v * 4, v * 7) for v in [5, 2, 4])
l1 = LineString(d1)
l2 = LineString(d2)
l3 = LineString(d3)
l4 = LineString(d4)
s1 = LineStringSeries([l1, l2, l3])
s2 = LineStringSeries([l2, l3, l4])
methods = (k for k, v in META_LINE_STRING.items() if v.first_is_geom)
for method in methods:
mcall = getattr(s1, method) # no error? good
value = mcall(s2)
self.assertEqual(3, len(value))
self.assertTrue(all(not callable(v) for v in value))
class PolygonSeriesTestCase(unittest.TestCase):
| """
Polygon GIS series unit tests.
"""
def test_property_adapt(self):
"""
Test adaptation of polygon properties
"""
poly = lambda v: Polygon(((v, v), (v + 0.1, v), (v + 0.2, v + 0.2), (v, v)))
data | = [poly(v) for v in [5, 2, 4]]
series = PolygonSeries(data)
attrs = (k for k, v in META_POLYGON.items() if v.is_property)
for attr in attrs:
value = getattr(series, attr) # no error? good
self.assertEqual(3, len(value))
def test_method_adapt_buffer(self):
"""
Test adaptation of polygon buffer method
"""
poly = lambda v: Polygon(((v, v), (v + 0.1, v), (v + 0.2, v + 0.2), (v, v)))
data = [poly(v) for v in [5, 2, 4]]
series = PolygonSeries(data)
value = series.buffer(0.2, resolution=3) # no error? good
self.assertEqual(3, len(value))
def test_method_adapt_geom(self):
"""
Test adaptation of polygon methods (first param is geometry)
"""
poly = lambda v: Polygon(((v, v), (v + 0.1, v), (v + 0.2, v + 0.2), (v, v)))
p1 = [poly(v) for v in [5, 2, 4]]
p2 = [poly(v) for v in [6, 2, 3]]
s1 = PolygonSeries(p1)
s2 = PolygonSeries(p2)
methods = (k for k, v in META_POLYGON.items() if v.first_is_geom)
for meth |
# -*- coding: utf-8 -*-
"""
***************************************************************************
SplitRGBBands.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from processing.tools.system import *
from processing.tools import dataobjects
from processing.saga.SagaUtils import SagaUtils
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4 import QtGui
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.parameters.ParameterRaster import ParameterRaster
from processing.outputs.OutputRaster import OutputRaster
import os
class SplitRGBBands(GeoAlgorithm):
INPUT = "INPUT"
R = "R"
G = "G"
B = "B"
def getIcon(self):
return QtGui.QIcon(os.path.dirname(__file__) + "/../images/saga.png")
def defineCharacteristics(self):
self.name = "Split RGB bands"
self.group = "Grid - Tools"
self.addParameter(ParameterRaster(SplitRGBBands.INPUT, "Input layer", False))
self.addOutput(OutputRaster(SplitRGBBands.R | , "Output R band layer"))
self.addOutput(OutputRaster(SplitRGBBands.G, "Output G band layer"))
self.addOutput(OutputRaster(SplitRGBBands.B, "Output B band layer"))
def processAlgorithm(self, progress):
#TODO:check correct num of b | ands
input = self.getParameterValue(SplitRGBBands.INPUT)
temp = getTempFilename(None).replace('.','');
basename = os.path.basename(temp)
validChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
safeBasename = ''.join(c for c in basename if c in validChars)
temp = os.path.join(os.path.dirname(temp), safeBasename)
r = self.getOutputValue(SplitRGBBands.R)
g = self.getOutputValue(SplitRGBBands.G)
b = self.getOutputValue(SplitRGBBands.B)
commands = []
if isWindows():
commands.append("io_gdal 0 -GRIDS \"" + temp + "\" -FILES \"" + input+"\"")
commands.append("io_gdal 1 -GRIDS \"" + temp + "_0001.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + r + "\"");
commands.append("io_gdal 1 -GRIDS \"" + temp + "_0002.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + g + "\"");
commands.append("io_gdal 1 -GRIDS \"" + temp + "_0003.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + b + "\"");
else:
commands.append("libio_gdal 0 -GRIDS \"" + temp + "\" -FILES \"" + input + "\"")
commands.append("libio_gdal 1 -GRIDS \"" + temp + "_0001.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + r + "\"");
commands.append("libio_gdal 1 -GRIDS \"" + temp + "_0002.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + g + "\"");
commands.append("libio_gdal 1 -GRIDS \"" + temp + "_0003.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + b + "\"");
SagaUtils.createSagaBatchJobFileFromSagaCommands(commands)
SagaUtils.executeSaga(progress);
|
import subprocess
from subprocess import Popen, PIPE
import os
import re
def compiler_preprocessor_verbose(compil | er, extraflags):
"""Capture the compiler preprocessor stage in verbose mode
"""
lines = | []
with open(os.devnull, 'r') as devnull:
cmd = [compiler, '-E']
cmd += extraflags
cmd += ['-', '-v']
p = Popen(cmd, stdin=devnull, stdout=PIPE, stderr=PIPE)
p.wait()
p.stdout.close()
lines = p.stderr.read()
lines = lines.decode('utf-8')
lines = lines.splitlines()
return lines
def system_include_paths(compiler, cpp=True):
extraflags = []
if cpp:
extraflags = '-x c++'.split()
lines = compiler_preprocessor_verbose(compiler, extraflags)
lines = [ line.strip() for line in lines ]
start = lines.index('#include <...> search starts here:')
end = lines.index('End of search list.')
lines = lines[start+1:end]
paths = []
for line in lines:
line = line.replace('(framework directory)', '')
line = line.strip()
paths.append(line)
return paths
|
# coding: utf-8
"""Client program.
How to collect data:
1. Positions received as a list of points
2. Interpolate function (probably unstructured grid, see scipy.interpolate.griddata)
3. Evaluate function on points coming from the FEniCS mesh
4. Restore the values onto the array of a FEniCS function in the proper order
Hint: http://fenicsproject | .org/qa/3975/interpolating-vector-function-from-python-code-to-fenics#a3976
fe.vector()[V.dofmap().dofs()] = f(x, y)
"""
import sys
import zmq
import pprint
# Socket to talk to server
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:5556")
print("Col | lecting data...")
dt = 0.1 # s
t_aim = 0.0 # s
# Collect all
while True:
# We first initialize the server
socket.send_pyobj({'time': t_aim})
t_aim += dt
data = socket.recv_pyobj()
print(data['time'], data.keys())
|
from allauth.socialaccount.tests import OAuth2TestsMixin
from allauth.tests import MockedResponse, TestCase
from .provider import PaypalProvider
class PaypalTests(OAuth2TestsMi | xin, TestCase):
provider_id = PaypalProvider.id
def get_mocked_response(self):
return MockedResponse(
200,
"""
{
"user_id":
| "https://www.paypal.com/webapps/auth/server/64ghr894040044",
"name": "Jane Doe",
"given_name": "Jane",
"family_name": "Doe",
"email": "janedoe@example.com"
}
""",
)
|
import mock
import json
import pytest
import prf.exc
def fake_resp(code):
return mock.MagicMock(
code = str(code),
status_code = code,
headers = [],
title = 'title',
detail = 'detail',
explanation = 'explanation'
)
class TestExc(object):
@mock.patch('prf.exc.log_exception')
def test_create_response(self, fake_log_exception):
out = prf.exc.create_response(fake_resp(200), {})
assert fake_log_exception.call_count == 0
# Temporarily pop timestamp, we need to freeze time to test it
d = json.loads(out.text)
d.pop('timestamp')
assert {
'explanation': 'explanation',
'code': '200',
'detail': 'detail',
'title': 'title'
} == d
assert out.content_type == 'application/json'
@mock.patch('prf.exc.add_stack')
@mock.patch('prf.exc.logger')
def test_log_exception(self, fake_logger, fake_add_stack):
request = mock.MagicMock(
url = 'url',
remote_user = 'remote_user',
client_addr = 'client_addr',
remote_addr = 'remote_addr'
)
out = prf.exc.log_exception(fake_resp(400),
params = dict(
headers = ['Header'],
request = request,
extra = {'a': 123},
detail = 'param detail'
)
)
assert fake_add_stack.called
assert fake_logger.error.called
@mock.patch('prf.exc.log_exception')
def test_create_response_w_log(self, fake_log_exception):
in_resp = mock.MagicMock()
in_resp.code | = '400'
in_resp.status_code = 400
out = prf.exc.create_response(in_resp, {})
assert 'error_id' in json.loads(out.text)
assert | fake_log_exception.call_count == 1
def test_exception_response(self):
out = prf.exc.exception_response(200)
assert out.code == 200
assert out.content_type == 'application/json'
assert 'error_id' not in out.json
out = prf.exc.exception_response(400, extra={'a':123})
assert 'error_id' in out.json
def test_statuses(self):
res = {'id':1}
out = prf.exc.HTTPCreated(
location = 'http://location',
resource = res
)
assert out.json['resource'] == {'self': 'http://location', 'id': 1}
|
rmat
from time import clock, sleep
try:
import unittest2 as unittest
except ImportError:
import unittest
import config
from event_stack import TimeOutReached
from database_reception import Database_Reception
from static_agent_pools import Receptionists, Customers
logging.basicConfig (level = logging.INFO)
class Test_Case (unittest.TestCase):
Caller = None
Receptionist = None
Receptionist_2 = None
Callee = None
Reception_Database = None
Reception = None
Start_Time = None
Next_Step = 1
def Preconditions (self, Reception):
self.Start_Time = clock ()
self.Next_Step = 1
self.Log ("Incoming calls test case: Setting up preconditions...")
self.Log ("Requesting a customer (caller)...")
self.Caller = Customers.request ()
self.Log ("Requesting a receptionist...")
self.Receptionist = Receptionists.request ()
self.Log ("Requesting a second receptionist...")
self.Receptionist_2 = Receptionists.request ()
self.Log ("Requesting a customer (callee)...")
self.Callee = Customers.request ()
self.Log ("Select which reception to test...")
self.Reception = Reception
self.Log ("Select a reception database connection...")
self.Reception_Database = Database_Reception (uri = config.reception_server_uri,
authtoken = self.Receptionist.call_control.authtoken)
def Postprocessing (self):
self.Log ("Incoming calls test case: Cleaning up after test...")
if not self.Caller is None:
self.Caller.release ()
if not self.Receptionist is None:
self.Receptionist.release ()
if not self.Receptionist_2 is None:
self.Receptionist_2.release ()
if not self.Callee is None:
self.Callee.release ()
def Step (self,
Message,
Delay_In_Seconds = 0.0):
if self.Next_Step is None:
self.Next_Step = 1
if self.Start_Time is None:
self.Start_Time = clock ()
logging.info ("Step " + str (self.Next_Step) + ": " + Message)
sleep (Delay_In_Seconds)
self.Next_Step += 1
def Log (self,
Message,
Delay_In_Seconds = 0.0):
if self.Next_Step is None:
self.Next_Step = 1
if self.Start_Time is None:
self.Start_Time = clock ()
logging.info (" " + str (self.Next_Step - 1) + ": " + Message)
sleep (Delay_In_Seconds)
def Caller_Places_Call (self, Number):
self.Step (Message = "Caller places call to " + str (Number) + "...")
self.Log (Message = "Dialling through caller agent...")
self.Caller.dial (Number)
def Receptionist_Places_Call (self, Number):
self.Step (Message = "Receptionist places call to " + str (Number) + "...")
self.Log (Message = "Dialling through receptionist agent...")
self.Receptionist.dial (Number)
def Caller_Hears_Dialtone (self):
self.Step (Message = "Caller hears dial-tone...")
self.Log (Message = "Caller agent waits for dial-tone...")
self.Caller.sip_phone.Wait_For_Dialtone ()
def Receptionist_Hears_Dialtone (self):
self.Step (Message = "Receptionist hears dial-tone...")
self.Log (Message = "Receptionist agent waits for dial-tone...")
self.Receptionist.sip_phone.Wait_For_Dialtone ()
def Call_Announced (self):
self.Step (Message = "Receptionist's client waits for 'call_offer'...")
try:
self.Receptionist.event_stack.WaitFor ("call_offer")
except TimeOutReached:
logging.critical (self.Receptionist.event_stack.dump_stack ())
self.fail ("Call offer didn't arrive from Call-Flow-Control.")
if not self.Receptionist.event_stack.stack_contains (event_type="call_offer",
destination=self.Reception):
logging.critical (self.Receptionist.event_stack.dump_s | tack ())
self.fail ("The arrived call offer was not for the expected reception (destination).")
return self.Receptionist.event_stack.Get_Latest_Event (Event_Type="call_offer", Destination=self.Reception)['call']['id'],\
self.Receptionist.event_stack.Get_Latest_Event (Event_Type="call_offer", Destination=self.Reception)['call']['reception_id']
def Call_Announced_As_Locked | (self, Call_ID):
self.Step (Message = "Call-Flow-Control sends out 'call_lock'...")
try:
self.Receptionist.event_stack.WaitFor (event_type = "call_lock",
call_id = Call_ID,
timeout = 20.0)
except TimeOutReached:
logging.critical (self.Receptionist.event_stack.dump_stack ())
self.fail ("No 'call_lock' event arrived from Call-Flow-Control.")
if not self.Receptionist.event_stack.stack_contains (event_type = "call_lock",
destination = self.Reception,
call_id = Call_ID):
logging.critical (self.Receptionist.event_stack.dump_stack ())
self.fail ("The arrived 'call_lock' event was not for the expected reception (destination).")
def Call_Announced_As_Unlocked (self, Call_ID):
self.Step (Message = "Call-Flow-Control sends out 'call_unlock'...")
try:
self.Receptionist.event_stack.WaitFor (event_type = "call_unlock",
call_id = Call_ID)
except TimeOutReached:
logging.critical (self.Receptionist.event_stack.dump_stack ())
self.fail ("No 'call_unlock' event arrived from Call-Flow-Control.")
if not self.Receptionist.event_stack.stack_contains (event_type = "call_unlock",
destination = self.Reception,
call_id = Call_ID):
logging.critical (self.Receptionist.event_stack.dump_stack ())
self.fail ("The arrived 'call_unlock' event was not for the expected reception (destination).")
def Request_Information (self, Reception_ID):
self.Step (Message = "Requesting (updated) information about reception " + str (Reception_ID))
Data_On_Reception = self.Reception_Database.Single (Reception_ID)
self.Step (Message = "Received information on reception " + str (Reception_ID))
return Data_On_Reception
def Offer_To_Pick_Up_Call (self, Call_Flow_Control, Call_ID):
self.Step (Message = "Client offers to answer call...")
try:
Call_Flow_Control.PickupCall (call_id = Call_ID)
except:
self.Log (Message = "Pick-up call returned an error of some kind.")
def Call_Allocation_Acknowledgement (self, Call_ID, Receptionist_ID):
self.Step (Message = "Receptionist's client waits for 'call_pickup'...")
try:
self.Receptionist.event_stack.WaitFor (event_type = "call_pickup",
call_id = Call_ID)
except TimeOutReached:
logging.critical (self.Receptionist.event_stack.dump_stack ())
self.fail ("No 'call_pickup' event arrived from Call-Flow-Control.")
try:
Event = self.Receptionist.event_stack.Get_Latest_Event (Event_Type = "call_pickup",
Call_ID = Call_ID)
except:
logging.critical (self.Receptionist.event_stack.dump_stack ())
self.fail ("Could not extract the received 'call_pickup' event from the Call-Flow-Control client.")
try:
if not Event['call']['assigned_to'] == Receptionist_ID:
|
"""
WSGI config for puuch project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a c | ustom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "puuch.settings"
os.environ.setdefault("DJANGO_SE | TTINGS_MODULE", "puuch.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2016, Zato Source s.r.o. https://zato.io
Licensed u | nder LGPLv3, see LICENSE.txt fo | r terms and conditions.
"""
|
to use in
the request.
Returns:
A list of keys whose values were NOT set because they already existed
in memcache. On total success, this list should be empty.
"""
return self._set_multi_with_policy(MemcacheSetRequest.REPLACE, mapping,
time=time, key_prefix=key_prefix,
namespace=namespace)
def replace_multi_async(self, mapping, time=0, key_prefix='',
min_compress_len=0, namespace=None, rpc=None):
"""Async version of replace_multi() -- note different return value.
Returns:
See _set_multi_async_with_policy().
"""
return self._set_multi_async_with_policy(MemcacheSetRequest.REPLACE,
mapping,
time=time, key_prefix=key_prefix,
namespace=namespace, rpc=rpc)
def cas_multi(self, mapping, time=0, key_prefix='', min_compress_len=0,
namespace=None):
"""Compare-And-Set update for multiple keys.
See cas() docstring for an explanation.
Args:
mapping: Dictionary of keys to values.
time: Optional expiration time, either relative number of seconds
from current time (up to 1 month), or an absolute Unix epoch time.
By default, items never expire, though items may be evicted due to
memory pressure. Float values will be rounded up to the nearest
whole second.
key_prefix: Prefix for to prepend to all keys.
min_compress_len: Unimplemented compatibility option.
namespace: a string specifying an optional namespace to use in
the request.
Returns:
A list of keys whose values were NOT set because the compare
failed. On total success, this list should be empty.
"""
return self._set_multi_with_policy(MemcacheSetRequest.CAS, mapping,
time=time, key_prefix=key_prefix,
namespace=namespace)
def cas_multi_async(self, mapping, time=0, key_prefix='',
min_compress_len=0, namespace=None, rpc=None):
"""Async version of cas_multi() -- note different return value.
Returns:
See _set_multi_async_with_policy().
"""
return self._set_multi_async_with_policy(MemcacheSetRequest.CAS, mapping,
time=time, key_prefix=key_prefix,
namespace=namespace, rpc=rpc)
def incr(self, key, delta=1, namespace=None, initial_value=None):
"""Atomically increments a key's value.
Internally, the value is a unsigned 64-bit integer. Memcache
doesn't check 64-bit overflows. The value, if too large, will
wrap around.
Unless an initial_value is specified, the key must already exist
in the cache to be incremented. To initialize a counter, either
specify initial_value or set() it to the initial value, as an
ASCII decimal integer. Future get()s of the key, post-increment,
will still be an ASCII decimal value.
Args:
key: Key to increment. If an iterable collection, each one of the keys
will be offset. See Client's docstring for details.
delta: Non-negative integer value (int or long) to increment key by,
defaulting to 1.
namespace: a string specifying an optional namespace to use in
the request.
initial_value: initial value to put in the cache, if it doesn't
already exist. The default value, None, will not create a cache
entry if it doesn't already exist.
Returns:
If key was a single value, the new long integer value, or None if key
was not in the cache, could not be incremented for any other reason, or
a network/RPC/server error occurred.
If key was an iterable collection, a dictionary will be returned
mapping supplied keys to values, with the values having the same meaning
as the singular return value of this me | thod.
Raises:
| ValueError: If number is negative.
TypeError: If delta isn't an int or long.
"""
return self._incrdecr(key, False, delta, namespace=namespace,
initial_value=initial_value)
def incr_async(self, key, delta=1, namespace=None, initial_value=None,
rpc=None):
"""Async version of incr().
Returns:
A UserRPC instance whose get_result() method returns the same
kind of value as incr() returns.
"""
return self._incrdecr_async(key, False, delta, namespace=namespace,
initial_value=initial_value, rpc=rpc)
def decr(self, key, delta=1, namespace=None, initial_value=None):
"""Atomically decrements a key's value.
Internally, the value is a unsigned 64-bit integer. Memcache
caps decrementing below zero to zero.
The key must already exist in the cache to be decremented. See
docs on incr() for details.
Args:
key: Key to decrement. If an iterable collection, each one of the keys
will be offset. See Client's docstring for details.
delta: Non-negative integer value (int or long) to decrement key by,
defaulting to 1.
namespace: a string specifying an optional namespace to use in
the request.
initial_value: initial value to put in the cache, if it doesn't
already exist. The default value, None, will not create a cache
entry if it doesn't already exist.
Returns:
If key was a single value, the new long integer value, or None if key
was not in the cache, could not be decremented for any other reason, or
a network/RPC/server error occurred.
If key was an iterable collection, a dictionary will be returned
mapping supplied keys to values, with the values having the same meaning
as the singular return value of this method.
Raises:
ValueError: If number is negative.
TypeError: If delta isn't an int or long.
"""
return self._incrdecr(key, True, delta, namespace=namespace,
initial_value=initial_value)
def decr_async(self, key, delta=1, namespace=None, initial_value=None,
rpc=None):
"""Async version of decr().
Returns:
A UserRPC instance whose get_result() method returns the same
kind of value as decr() returns.
"""
return self._incrdecr_async(key, True, delta, namespace=namespace,
initial_value=initial_value, rpc=rpc)
def _incrdecr(self, key, is_negative, delta, namespace=None,
initial_value=None):
"""Increment or decrement a key by a provided delta.
Args:
key: Key to increment or decrement. If an iterable collection, each
one of the keys will be offset.
is_negative: Boolean, if this is a decrement.
delta: Non-negative integer amount (int or long) to increment
or decrement by.
namespace: a string specifying an optional namespace to use in
the request.
initial_value: initial value to put in the cache, if it doesn't
already exist. The default value, None, will not create a cache
entry if it doesn't already exist.
Returns:
New long integer value, or None on cache miss or network/RPC/server
error.
Raises:
ValueError: If delta is negative.
TypeError: If delta isn't an int or long.
"""
rpc = self._incrdecr_async(key, is_negative, delta, namespace,
initial_value)
return rpc.get_result()
def _incrdecr_async(self, key, is_negative, delta, namespace=None,
initial_value=None, rpc=None):
"""Async version of _incrdecr().
Returns:
A UserRPC instance whose get_result() method returns the same
kind of value as _incrdecr() returns.
"""
if not isinstance(delta, (int, long)):
raise TypeError('Delta must be an integer or long, received %r' % delta)
if delta < 0:
raise ValueError('Delta must not be negative.')
|
# coding: utf8
# Copyright 2015 Vincent Jacques <vincent@vincent-jacques.net>
import unittest
import MockMockMock
from ..bus import Bus
from .ping import Ping
from .read_data import ReadData
from .write_data import WriteData
from .reg_write import RegWrite
from .action import Action
from .reset import Reset
from .sync_write import SyncWrite
class InstructionsOnBusTestCase(unittest.TestCase):
def setUp(self):
super(InstructionsOnBusTestCase, self).setUp()
self.mocks = MockMockMock.Engine()
self.hardware = self.mocks.create("hardware")
self.bus = Bus(self.hardware.object)
def tearDown(self):
self.mocks.tearDown()
super(InstructionsOnBusTestCase, self).tearDown()
def test_ping(self):
self.hardware.expect.send([0xFF, 0xFF, 0x07, 0x02, 0x01, 0xF5])
self.hardware.expect.receive(4).and_return([0xFF, 0xFF, 0x07, 0x02])
self.hardware.expect.receive(2).and_return([0x00, 0xF6])
self.bus.send(0x07, Ping())
def test_read_data(self):
# http://support.robotis.com/en/product/dynamixel/communication/dxl_instruction.htm (example 1)
self.hardware.expect.send([0xFF, 0xFF, 0x01, 0x04, 0x02, 0x2B, 0x01, 0xCC])
self.hardware.expect.receive(4).and_return([0xFF, 0xFF, 0x01, 0x03])
self.hardware.expect.receive(3).and_return([0x00, 0x20, 0xDB])
ident, error, response = self.bus.send(0x01, ReadData(0x2B))
self.assertEqual(response.data, [0x20])
def test_write_data(self):
self.hardware.expect.send([0xFF, 0xFF, 0x01, 0x06, 0x03, 0x2B, 0x10, 0x11, 0x12, 0x97])
self.hardware.expect.receive(4).and_return([0xFF, 0xFF, 0x07, 0x02])
self.hardware.expect.receive(2).and_return([0x00, 0xF6])
self.bus.send(0x01, WriteData(0x2B, [0x10, 0x11, 0x12]))
def test_reg_write(self):
self.hardware.expect.send([0xFF, 0xFF, 0x01, 0x06, 0x04, 0x2B, 0x10, 0x11, 0x12, 0x96])
self.hardware.expect.receive(4).and_return([0xFF, 0xFF, 0x07, 0x02])
self.hardware.expect.receive(2).and_return([0x00, 0xF6])
self.bus.send(0x01, RegWrite(0x2B, [0x10, 0x11, 0x12]))
def test_action(self):
self.hardware.expect.send([0xFF, 0xFF, 0x07, 0x02, 0x05, 0xF1])
self.hardware.expect.receive(4).and_return([0xFF, 0xFF, 0x07, 0x02])
self.hardware.expect.receive(2).and_return([0x00, 0xF6])
self.bus.send(0x07, Action())
def test_reset(self):
| self.hardware.expect.send([0xFF, 0xFF, 0x07, 0x02, 0x06, 0xF0])
self.hardware.expect.receive(4).and_return([0xFF, 0xFF, 0x07, 0x02])
| self.hardware.expect.receive(2).and_return([0x00, 0xF6])
self.bus.send(0x07, Reset())
def test_sync_write(self):
# http://support.robotis.com/en/product/dynamixel/communication/dxl_instruction.htm (example 5)
self.hardware.expect.send([
0xFF, 0xFF, 0xFE, 0x18, 0x83, 0x1E, 0x04,
0x00, 0x10, 0x00, 0x50, 0x01,
0x01, 0x20, 0x02, 0x60, 0x03,
0x02, 0x30, 0x00, 0x70, 0x01,
0x03, 0x20, 0x02, 0x80, 0x03,
0x12,
])
self.bus.broadcast(SyncWrite(
0x1E,
{
0: [0x10, 0x00, 0x50, 0x01],
1: [0x20, 0x02, 0x60, 0x03],
2: [0x30, 0x00, 0x70, 0x01],
3: [0x20, 0x02, 0x80, 0x03],
}
))
|
# coding: utf-8
from unittest import TestCase
#import os
#import sys
#root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
#sys.path.insert(0, root)
from test.util import GRAB_TRANSPORT, ignore_transport, only_transport
from test.server import SERVER
from grab.selector import KitSelector
from grab import Grab
from grab.util.py3k_support import *
HTML = """
<html>
<body>
<h1>test</h1>
<ul>
<li>one</li>
<li>two</li>
<li>three</li>
<li class="zzz" id="6">z 4 foo</li>
</ul>
<ul id="second-list">
<li class="li-1">yet one</li>
<li class="li-2">yet two</li>
</ul>
</body>
</html>
"""
class KitSelectorTestCase(TestCase):
def setUp(self):
g = Grab(transport='grab.transport.kit.KitTransport')
SERVER.RESPONSE['get'] = HTML
g.go(SERVER.BASE_URL)
self.qt_doc = g.transport.kit.page.mainFrame().documentElement()
def test_in_general(self):
sel = KitSelector(self.qt_doc)
def test_select_node(self):
sel = KitSelector(self.qt_doc).select('h1')[0]
self.assertEquals('test', sel.node.toInnerXml())
def test_html(self):
sel = KitSelector(self.qt_doc).select('h1')[0]
self.assertEquals('<h1>test</h1>', sel.html())
def test_textselector(self):
self.assertEquals('one', KitSelector(self.qt_doc).select('li').text())
def test_number(self):
self.assertEquals(4, KitSelector(self.qt_doc).select('li.zzz').number())
# TODO
# test the ID selector (#6)
#def test_text_selector(self):
#sel = KitSelector(self.qt_doc).select('//li/text()').one()
#self.assertTrue(isinstance(sel, TextSelector))
## TODO: add --pyquery flag to runtest script
##def test_select_pyquery(self):
##root = Selector(self.qt_doc)
##self.assertEquals('test', root.select(pyquery='h1')[0].node.text)
##self.assertEquals('z 4 foo', root.select(pyquery='body')[0].select(pyquery='#6')[0].node.text)
def test_select_select(self):
root = KitSelector(self.qt_doc)
self.assertEquals(set(['one', 'yet one']),
set([x.text() for x in root.select('ul').select('li:first-child')]),
)
def test_text_list(self):
root = KitSelector(self.qt_doc)
self.assertEquals(set(['one', 'yet one']),
set(root.select('ul > li:first-child').text_list()),
)
def test_attr_list(self):
root = KitSelector(self.qt_doc)
self.assertEquals(set(['li-1', 'li-2']),
set(root.select('ul[id=second-list] > li')\
.attr_list('class'))
)
class TestSelectorList(TestCase):
def setUp(self):
g = Grab(transport='grab.transport.kit.KitTransport')
SERVER.RESPONSE['get'] = HTML
g.go(SERVER.BASE_URL)
self.qt_doc = g.transport.kit.page.mainFrame().documentElement()
def test_one(self):
sel = KitSelector(self.qt_doc).select('ul > li')
self.assertEquals('one', unicode(sel.one().node.toPlainText()))
self.assertEquals('one', s | el.text())
def test_number(self):
sel = KitSelector(self.qt_doc).select('li:nth-child(4)')
self.assertEquals(4, sel.number())
def test_exists(self):
sel = KitSelector(self.qt_doc).select('li:nth-child(4)')
self.assertEquals(True, sel.exists())
sel = KitSelector(self.qt_doc).select('li:nth-child(5)')
| self.assertEquals(False, sel.exists())
|
# syft absolute
import syft as | sy
from syft.lib.python.string import String
from syft.proto.lib.python.string_pb2 import String as String_PB
def test_string_serde() -> None:
syft_string = String("Hello OpenMined")
serialize | d = syft_string._object2proto()
assert isinstance(serialized, String_PB)
deserialized = String._proto2object(proto=serialized)
assert isinstance(deserialized, String)
assert deserialized.id == syft_string.id
def test_string_send(client: sy.VirtualMachineClient) -> None:
syft_string = String("Hello OpenMined!")
ptr = syft_string.send(client)
# Check pointer type
assert ptr.__class__.__name__ == "StringPointer"
# Check that we can get back the object
res = ptr.get()
assert res == syft_string
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that app bundles are built correctly.
"""
from __future__ import pr | int_function
import TestGyp
import TestMac
import os
import plistlib
import subprocess
import sys
if sys.platform == 'darwin':
print("This test is currently disabled: https://crbug.com/483696.")
sys.exit(0)
def ExpectEq(expected, actual):
if expected != actual:
print('Expected "%s", got "%s"' % (expected, actual), file=sys.stderr)
test.fail_test()
def | ls(path):
'''Returns a list of all files in a directory, relative to the directory.'''
result = []
for dirpath, _, files in os.walk(path):
for f in files:
result.append(os.path.join(dirpath, f)[len(path) + 1:])
return result
# Xcode supports for assets catalog was introduced in Xcode 6.0
if sys.platform == 'darwin' and TestMac.Xcode.Version() >= '0600':
test_gyp_path = 'test-assets-catalog.gyp'
test_app_path = 'Test App Assets Catalog Gyp.app'
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp(test_gyp_path, chdir='app-bundle')
test.build(test_gyp_path, test.ALL, chdir='app-bundle')
# Binary
test.built_file_must_exist(
os.path.join(test_app_path, 'Contents/MacOS/Test App Assets Catalog Gyp'),
chdir='app-bundle')
# Info.plist
info_plist = test.built_file_path(
os.path.join(test_app_path, 'Contents/Info.plist'),
chdir='app-bundle')
test.must_exist(info_plist)
test.must_contain(
info_plist,
'com.google.Test-App-Assets-Catalog-Gyp') # Variable expansion
test.must_not_contain(info_plist, '${MACOSX_DEPLOYMENT_TARGET}');
if test.format != 'make':
# TODO: Synthesized plist entries aren't hooked up in the make generator.
machine = subprocess.check_output(['sw_vers', '-buildVersion']).rstrip('\n')
plist = plistlib.readPlist(info_plist)
ExpectEq(machine, plist['BuildMachineOSBuild'])
expected = ''
version = TestMac.Xcode.SDKVersion()
expected = 'macosx' + version
ExpectEq(expected, plist['DTSDKName'])
sdkbuild = TestMac.Xcode.SDKBuild()
if not sdkbuild:
# Above command doesn't work in Xcode 4.2.
sdkbuild = plist['BuildMachineOSBuild']
ExpectEq(sdkbuild, plist['DTSDKBuild'])
ExpectEq(TestMac.Xcode.Version(), plist['DTXcode'])
ExpectEq(TestMac.Xcode.Build(), plist['DTXcodeBuild'])
# Resources
strings_files = ['InfoPlist.strings', 'utf-16be.strings', 'utf-16le.strings']
for f in strings_files:
strings = test.built_file_path(
os.path.join(test_app_path, 'Contents/Resources/English.lproj', f),
chdir='app-bundle')
test.must_exist(strings)
# Xcodes writes UTF-16LE with BOM.
contents = open(strings, 'rb').read()
if not contents.startswith('\xff\xfe' + '/* Localized'.encode('utf-16le')):
test.fail_test()
test.built_file_must_exist(
os.path.join(
test_app_path, 'Contents/Resources/English.lproj/MainMenu.nib'),
chdir='app-bundle')
# make does not supports .xcassets files
extra_content_files = []
if test.format != 'make':
extra_content_files = ['Contents/Resources/Assets.car']
for f in extra_content_files:
test.built_file_must_exist(
os.path.join(test_app_path, f),
chdir='app-bundle')
# Packaging
test.built_file_must_exist(
os.path.join(test_app_path, 'Contents/PkgInfo'),
chdir='app-bundle')
test.built_file_must_match(
os.path.join(test_app_path, 'Contents/PkgInfo'), 'APPLause',
chdir='app-bundle')
# Check that no other files get added to the bundle.
if set(ls(test.built_file_path(test_app_path, chdir='app-bundle'))) != \
set(['Contents/MacOS/Test App Assets Catalog Gyp',
'Contents/Info.plist',
'Contents/Resources/English.lproj/MainMenu.nib',
'Contents/PkgInfo',
] + extra_content_files +
[os.path.join('Contents/Resources/English.lproj', f)
for f in strings_files]):
test.fail_test()
test.pass_test()
|
import threading
import Queue
from random import random
theVar = 0
class MyThread ( threading.Thread ):
def run ( self ):
global theVar
while True:
client=myPool.get()
print '- iniciando | thread-'
if client=='stop':
print 'Thread terminou <-'
return 0
elif client!=None:
for x in xrange(int(random()*1000000)): continue
theVar = theVar + 1
print '- Threa | d rodou -'
# ##
print '#####################################################'
#number of threads
NUMTHREADS = 3
NUMPROCESSES=20
#create a pool manager
myPool = Queue.Queue(0)
#starts only 2 threads
for x in xrange (NUMTHREADS):
print '-> Iniciando thread', x
MyThread().start()
#pass data into thread pool
#and run thread a couple of times
for x in xrange(NUMPROCESSES):
print '- passando dados para thread -'
myPool.put('dummy')
#stop the threads
for x in xrange(NUMTHREADS):
print '- Stopping thread -'
myPool.put('stop')
|
# coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` | [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAP | I document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import talon_one
from talon_one.models.attributes_mandatory import AttributesMandatory # noqa: E501
from talon_one.rest import ApiException
class TestAttributesMandatory(unittest.TestCase):
"""AttributesMandatory unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test AttributesMandatory
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = talon_one.models.attributes_mandatory.AttributesMandatory() # noqa: E501
if include_optional :
return AttributesMandatory(
campaigns = [
'0'
],
coupons = [
'0'
]
)
else :
return AttributesMandatory(
)
def testAttributesMandatory(self):
"""Test AttributesMandatory"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
energy_import",
"start": period2.isoformat(),
"end": (period2 + timedelta(hours=1)).isoformat(),
"max": None,
"mean": None,
"min": None,
"last_reset": None,
"state": approx(1.0),
"sum": approx(3.0),
},
]
}
# Update the previously inserted statistics
external_statistics = {
"start": period1,
"max": 1,
"mean": 2,
"min": 3,
"last_reset": None,
"state": 4,
"sum": 5,
}
async_add_external_statistics(hass, external_metadata, (external_statistics,))
wait_recording_done(hass)
stats = statistics_during_period(hass, zero, period="hour")
assert stats == {
"test:total_energy_import": [
{
"statistic_id": "test:total_energy_import",
"start": period1.isoformat(),
"end": (period1 + timedelta(hours=1)).isoformat(),
"max": approx(1.0),
"mean": approx(2.0),
"min": approx(3.0),
"last_reset": None,
"state": approx(4.0),
"sum": approx(5.0),
},
{
"statistic_id": "test:total_energy_import",
"start": period2.isoformat(),
"end": (period2 + timedelta(hours=1)).isoformat(),
"max": None,
"mean": None,
"min": None,
"last_reset": None,
"state": approx(1.0),
"sum": approx(3.0),
},
]
}
def test_external_statistics_errors(hass_recorder, caplog):
"""Test validation of external statistics."""
hass = hass_recorder()
wait_recording_done(hass)
assert "Compiling statistics for" not in caplog.text
assert "Statistics already compiled" not in caplog.text
zero = dt_util.utcnow()
period1 = zero.replace(minute=0, second=0, microsecond=0) + timedelta(hours=1)
_external_statistics = {
"start": period1,
"last_reset": None,
"state": 0,
"sum": 2,
}
_external_metadata = {
"has_mean": False,
"has_sum": True,
"name": "Total imported energy",
"source": "test",
"statistic_id": "test:total_energy_import",
"unit_of_measurement": "kWh",
}
# Attempt to insert statistics for an entity
external_metadata = {
**_external_metadata,
"statistic_id": "sensor.total_energy_import",
}
external_statistics = {**_external_statistics}
with pytest.raises(HomeAssistantError):
async_add_external_statistics(hass, external_metadata, (external_statistics,))
wait_recording_done(hass)
assert statistics_during_period(hass, zero, period="hour") == {}
assert list_statistic_ids(hass) == []
assert get_metadata(hass, statistic_ids=("sensor.total_energy_import",)) == {}
# Attempt to insert statistics for the wrong domain
external_metadata = {**_external_metadata, "source": "other"}
external_statistics = {**_external_statistics}
with pytest.raises(HomeAssistantError):
async_add_external_statistics(hass, external_metadata, (external_statistics,))
wait_recording_done(hass)
assert statistics_during_period(hass, zero, period="hour") == {}
assert list_statistic_ids(hass) == []
assert get_metadata(hass, statistic_ids=("test:total_energy_import",)) == {}
# Attempt to insert statistics for an naive starting time
external_metadata = {**_external_metadata}
external_statistics = {
**_external_statistics,
"start": perio | d1.replace(tzinfo=None),
}
with pytest.raises(HomeAssistantError):
asyn | c_add_external_statistics(hass, external_metadata, (external_statistics,))
wait_recording_done(hass)
assert statistics_during_period(hass, zero, period="hour") == {}
assert list_statistic_ids(hass) == []
assert get_metadata(hass, statistic_ids=("test:total_energy_import",)) == {}
# Attempt to insert statistics for an invalid starting time
external_metadata = {**_external_metadata}
external_statistics = {**_external_statistics, "start": period1.replace(minute=1)}
with pytest.raises(HomeAssistantError):
async_add_external_statistics(hass, external_metadata, (external_statistics,))
wait_recording_done(hass)
assert statistics_during_period(hass, zero, period="hour") == {}
assert list_statistic_ids(hass) == []
assert get_metadata(hass, statistic_ids=("test:total_energy_import",)) == {}
@pytest.mark.parametrize("timezone", ["America/Regina", "Europe/Vienna", "UTC"])
@pytest.mark.freeze_time("2021-08-01 00:00:00+00:00")
def test_monthly_statistics(hass_recorder, caplog, timezone):
"""Test inserting external statistics."""
dt_util.set_default_time_zone(dt_util.get_time_zone(timezone))
hass = hass_recorder()
wait_recording_done(hass)
assert "Compiling statistics for" not in caplog.text
assert "Statistics already compiled" not in caplog.text
zero = dt_util.utcnow()
period1 = dt_util.as_utc(dt_util.parse_datetime("2021-09-01 00:00:00"))
period2 = dt_util.as_utc(dt_util.parse_datetime("2021-09-30 23:00:00"))
period3 = dt_util.as_utc(dt_util.parse_datetime("2021-10-01 00:00:00"))
period4 = dt_util.as_utc(dt_util.parse_datetime("2021-10-31 23:00:00"))
external_statistics = (
{
"start": period1,
"last_reset": None,
"state": 0,
"sum": 2,
},
{
"start": period2,
"last_reset": None,
"state": 1,
"sum": 3,
},
{
"start": period3,
"last_reset": None,
"state": 2,
"sum": 4,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 5,
},
)
external_metadata = {
"has_mean": False,
"has_sum": True,
"name": "Total imported energy",
"source": "test",
"statistic_id": "test:total_energy_import",
"unit_of_measurement": "kWh",
}
async_add_external_statistics(hass, external_metadata, external_statistics)
wait_recording_done(hass)
stats = statistics_during_period(hass, zero, period="month")
sep_start = dt_util.as_utc(dt_util.parse_datetime("2021-09-01 00:00:00"))
sep_end = dt_util.as_utc(dt_util.parse_datetime("2021-10-01 00:00:00"))
oct_start = dt_util.as_utc(dt_util.parse_datetime("2021-10-01 00:00:00"))
oct_end = dt_util.as_utc(dt_util.parse_datetime("2021-11-01 00:00:00"))
assert stats == {
"test:total_energy_import": [
{
"statistic_id": "test:total_energy_import",
"start": sep_start.isoformat(),
"end": sep_end.isoformat(),
"max": None,
"mean": None,
"min": None,
"last_reset": None,
"state": approx(1.0),
"sum": approx(3.0),
},
{
"statistic_id": "test:total_energy_import",
"start": oct_start.isoformat(),
"end": oct_end.isoformat(),
"max": None,
"mean": None,
"min": None,
"last_reset": None,
"state": approx(3.0),
"sum": approx(5.0),
},
]
}
dt_util.set_default_time_zone(dt_util.get_time_zone("UTC"))
def record_states(hass):
"""Record some test states.
We inject a bunch of state updates temperature sensors.
"""
mp = "media_player.test"
sns1 = "sensor.test1"
sns2 = "sensor.test2"
sns3 = "sensor.test3"
sns4 = "sensor.test4"
sns1_attr = {
"device_class": "temperature",
"state_class": "measurement",
"unit_of_measurement": TEMP_CELSIUS,
}
sns2_attr = {
"device_class": "humidity",
"state_class": "measurement",
"unit_of_measurement |
on in parser.sections():
# validate section name against the 'section "foo"' pattern
section_pattern = r'submodule "(.*)"'
if re.match(section_pattern, section):
# parse the submodule name, URL and path
name = re.sub(section_pattern, r'\1', section)
url = parser.get(section, 'url')
path = parser.get(section, 'path')
try:
sha1 = gd.get_submodule_commit(self.ref, path)
except morphlib.gitdir.MissingSubmoduleCommitError:
# Ignore submodules listed in .gitmodules file that are
# not pointing to a git commit object. If you try to clone
# a repo with submodules without a commit object (using
# Git), nothing will happen, and Git will not even complain
continue
# create a submodule object
submodule = Submodule(name, url, sha1, path)
self.submodules.append(submodule)
else:
raise InvalidSectionError(self.repo, self.ref, section)
def __iter__(self):
for submodule in self.submodules:
yield submodule
def __len__(self):
return len(self.submodules)
def update_submodules(app, repo_dir): # pragma: no cover
'''Set up repo submodules, rewriting the URLs to expand prefixes
We do this automatically rather than leaving it to the user so that they
don't have to worry about the prefixed URLs manually.
'''
if os.path.exists(os.path.join(repo_dir, '.gitmodules')):
resolver = morphlib.repoaliasresolver.RepoA | liasResolver(
app.settings['repo-alias'])
gitcmd(app.runcmd, 'submodule', 'init', cwd=repo_dir)
submodules = Submodules(repo_dir, 'HEAD')
submodules.load()
for submodule in submodules:
gitcmd(app.runcmd, 'config', 'subm | odule.%s.url' % submodule.name,
resolver.pull_url(submodule.url), cwd=repo_dir)
gitcmd(app.runcmd, 'submodule', 'update', cwd=repo_dir)
class ConfigNotSetException(cliapp.AppException):
def __init__(self, missing, defaults):
self.missing = missing
self.defaults = defaults
if len(missing) == 1:
self.preamble = ('Git configuration for %s has not been set. '
'Please set it with:' % missing[0])
else:
self.preamble = ('Git configuration for keys %s and %s '
'have not been set. Please set them with:'
% (', '.join(missing[:-1]), missing[-1]))
def __str__(self):
lines = [self.preamble]
lines.extend('git config --global %s \'%s\'' % (k, self.defaults[k])
for k in self.missing)
return '\n '.join(lines)
class IdentityNotSetException(ConfigNotSetException):
preamble = 'Git user info incomplete. Please set your identity, using:'
def __init__(self, missing):
self.defaults = {"user.name": "My Name",
"user.email": "me@example.com"}
self.missing = missing
def get_user_name(runcmd):
'''Get user.name configuration setting. Complain if none was found.'''
if 'GIT_AUTHOR_NAME' in os.environ:
return os.environ['GIT_AUTHOR_NAME'].strip()
try:
config = check_config_set(runcmd, keys={"user.name": "My Name"})
return config['user.name']
except ConfigNotSetException as e:
raise IdentityNotSetException(e.missing)
def get_user_email(runcmd):
'''Get user.email configuration setting. Complain if none was found.'''
if 'GIT_AUTHOR_EMAIL' in os.environ:
return os.environ['GIT_AUTHOR_EMAIL'].strip()
try:
cfg = check_config_set(runcmd, keys={"user.email": "me@example.com"})
return cfg['user.email']
except ConfigNotSetException as e:
raise IdentityNotSetException(e.missing)
def check_config_set(runcmd, keys, cwd='.'):
''' Check whether the given keys have values in git config. '''
missing = []
found = {}
for key in keys:
try:
value = gitcmd(runcmd, 'config', key, cwd=cwd,
print_command=False).strip()
found[key] = value
except cliapp.AppException:
missing.append(key)
if missing:
raise ConfigNotSetException(missing, keys)
return found
def copy_repository(runcmd, repo, destdir, is_mirror=True):
'''Copies a cached repository into a directory using cp.
This also fixes up the repository afterwards, so that it can contain
code etc. It does not leave any given branch ready for use.
This is slightly faster than `git clone` for large repositories,
as of Git 2.3.0. Long term, we should fix `git clone` to be as fast
as possible, and use that.
'''
if is_mirror == False:
runcmd(['cp', '-a', os.path.join(repo, '.git'),
os.path.join(destdir, '.git')])
return
runcmd(['cp', '-a', repo, os.path.join(destdir, '.git')])
runcmd(['chown', '-R', '%s:%s' % (os.getuid(), os.getgid()), destdir])
# core.bare should be false so that git believes work trees are possible
gitcmd(runcmd, 'config', 'core.bare', 'false', cwd=destdir)
# we do not want the origin remote to behave as a mirror for pulls
gitcmd(runcmd, 'config', '--unset', 'remote.origin.mirror', cwd=destdir)
# we want a traditional refs/heads -> refs/remotes/origin ref mapping
gitcmd(runcmd, 'config', 'remote.origin.fetch',
'+refs/heads/*:refs/remotes/origin/*', cwd=destdir)
# set the origin url to the cached repo so that we can quickly clean up
gitcmd(runcmd, 'config', 'remote.origin.url', repo, cwd=destdir)
# by packing the refs, we can then edit then en-masse easily
gitcmd(runcmd, 'pack-refs', '--all', '--prune', cwd=destdir)
# turn refs/heads/* into refs/remotes/origin/* in the packed refs
# so that the new copy behaves more like a traditional clone.
logging.debug("Adjusting packed refs for %s" % destdir)
with open(os.path.join(destdir, ".git", "packed-refs"), "r") as ref_fh:
pack_lines = ref_fh.read().split("\n")
with open(os.path.join(destdir, ".git", "packed-refs"), "w") as ref_fh:
ref_fh.write(pack_lines.pop(0) + "\n")
for refline in pack_lines:
if ' refs/remotes/' in refline:
continue
if ' refs/heads/' in refline:
sha, ref = refline[:40], refline[41:]
if ref.startswith("refs/heads/"):
ref = "refs/remotes/origin/" + ref[11:]
refline = "%s %s" % (sha, ref)
ref_fh.write("%s\n" % (refline))
# Finally run a remote update to clear up the refs ready for use.
gitcmd(runcmd, 'remote', 'update', 'origin', '--prune', cwd=destdir)
def reset_workdir(runcmd, gitdir):
'''Removes any differences between the current commit '''
'''and the status of the working directory'''
gitcmd(runcmd, 'clean', '-fxd', cwd=gitdir)
gitcmd(runcmd, 'reset', '--hard', 'HEAD', cwd=gitdir)
def is_valid_sha1(ref):
'''Checks whether a string is a valid SHA1.'''
return len(ref) == 40 and all(x in string.hexdigits for x in ref)
def gitcmd(runcmd, *args, **kwargs):
'''Run git commands safely'''
if 'env' not in kwargs:
kwargs['env'] = dict(os.environ)
# git replace means we can't trust that just the sha1 of the branch
# is enough to say what it contains, so we turn it off by setting
# the right flag in an environment variable.
kwargs['env']['GIT_NO_REPLACE_OBJECTS'] = '1'
cmdline = ['git']
echo_stderr = kwargs.pop('echo_stderr', False)
if echo_stderr:
if 'stderr' not in kwargs:
# Ensure status output is visible. Git will hide it if stderr is
# redirected somewhere else (the --progress flag overrides this
# behaviour for the 'clone' command, but not others).
kwargs['stderr'] = sys.stderr
cmdline.extend(args)
re |
from __future__ import print_function
import time
import numpy as np
from mpi4py import MPI
from python_compat import range
comm = MPI.COMM_WORLD
def r_print(*args):
"""
print message on the root node (rank 0)
:param args:
:return:
"""
if comm.rank == 0:
print('ROOT:', end=' ')
for i in args:
print(i, end=' ')
# noinspection PyArgumentList
print()
def l_print(*args):
"""
print message on each node, synchronized
:param args:
:return:
"""
for rank in range(0, comm.size):
comm.Barrier()
if rank == comm.rank:
l_print_no_barr | ier(*args)
comm.Barrier()
def l_print_no_barrier(*args):
"""
print message on each node
:param args:
:return:
"""
print(comm.rank, ':', end=' ')
for i in args:
print(i, end=' ')
# noinspection PyArgumentList
print()
def get_chunks(num_items, num_steps):
"""
divide items into n=num_steps chunks
:param num_items:
: | param num_steps:
:return: chunk sizes, chunk offsets
"""
chunk_sizes = np.zeros(num_steps, dtype=int)
chunk_sizes[:] = num_items // num_steps
chunk_sizes[:num_items % num_steps] += 1
chunk_offsets = np.roll(np.cumsum(chunk_sizes), 1)
chunk_offsets[0] = 0
return chunk_sizes, chunk_offsets
def barrier_sleep(mpi_comm=comm, tag=1747362612, sleep=0.1, use_yield=False):
"""
As suggested by Lisandro Dalcin at:
https://groups.google.com/forum/?fromgroups=#!topic/mpi4py/nArVuMXyyZI
"""
size = mpi_comm.Get_size()
if size == 1:
return
rank = mpi_comm.Get_rank()
mask = 1
while mask < size:
dst = (rank + mask) % size
src = (rank - mask + size) % size
req = mpi_comm.isend(None, dst, tag)
while not mpi_comm.Iprobe(src, tag):
if use_yield:
yield False
time.sleep(sleep)
mpi_comm.recv(None, src, tag)
req.Wait()
mask <<= 1
if use_yield:
yield True
|
'''
page_with_rectangle = coord_element.split('inpage="')[1].split('"')[0]
coordinates_start = coord_element.split('<coord')[1]
coordinates_clean_start = '>'.join(coordinates_start.split('>')[1:])
clean_coordinates = coordinates_clean_start.split('</coord>')[0]
try:
return [int(i) for i in clean_coordinates.split(':')], page_with_rectangle
except:
print 'coordinate parsing failed; exiting'
sys.exit()
###############################
# Generate Master XML Mapping #
###############################
def generate_issue_page_rectangle_mapping(root_data_directory):
'''
Read in a path to a directory that has subdirectories, each of
which has files for a single newspaper issue. Iterate over those
issue directories and create a dictionary that maps the following
hierarchy:
issue directory -> multiple pages
each page id -> multiple rectangles
For each rectangle, store that rectangle's coordinates
and the page and article to which the rectangle belongs
(e.g. if we have a rectangle printed on page 2 that is continued
from page 1, store an indicate that the rectangle belongs
to an article with a particular index position on page 1).
Then write this mapping to disk
'''
# unique identifier given to each rectangle created
rect_id = 0
# d[issue_directory][article_xml_filename][article_index] = [{img_with_rect:, rect_coords:}]
rects_to_articles = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
# d[issue_directory][img_file] = [{rect_id: , rect_coords: }]
imgs_to_crop = defaultdict(lambda: defaultdict(list))
# each issue_directory contains a single issue of the newspaper
issue_directories = get_issue_directories(root_data_directory)
for issue_directory in issue_directories:
page_id_to_page_file, page_file_to_page_id = get_page_mappings(issue_directory)
# each xml/jp2 file combination in the issue_directory
# contains a single newspaper page
xml_pages = get_article_xml_files(issue_directory)
# iterate over each xml file and get all articles on that page
for page_index, xml_page in enumerate(xml_pages):
# xml_content is a unicode string with the xml content
xml_content = read_xml_file(xml_page)
# page_articles is an array of the article elements in the xml
xml_articles = get_xml_articles(xml_content)
# loop over each article and get all 'clips' for that article
for article_index, xml_article in enumerate(xml_articles):
# store a boolean indicating whether this article includes rects from
# multiple images. This is used to prevent us from duplicating rects
# in articles with multiple pages (as the ALTO XML does)
rects_already_stored = False
# xml_clips is an array of the clips within the current article
xml_clips = get_article_clips(xml_article)
# each xml_clip contains a sequence of coordinates that define
# a rectangle a user drew on a jp2 image
for clip_index, xml_clip in enumerate(xml_clips):
# xml_coords is an array of the coords in the current clip
xml_coords = get_clip_coords(xml_clip)
# each xml_coord is an array of 4 coordinates that describe the
# x_offset, y_offset, width, height of a rectangle to be
# extracted from the jp2. This rectangle is described in xml
# units and must be converted into pixel units for extraction
# The coord_inpage_value indicates the integer used in the <coord
# inpage={int} value, which must be mapped to a proper file
for xml_coord_index, xml_coord in enumerate(xml_coords):
xml_coordinate_array, coord_inpage_value = get_coordinate_array(xml_coord)
# now use the coord_inpage_value to identify the file with the rectangle
try:
img_with_rect = page_id_to_page_file[int(coord_inpage_value)]
# handle case where issue references a page that doesn't exist
except KeyError:
with open('missing_page_articles.txt', 'a') as missing_out:
msg = issue_directory + ' ' + coord_inpage_value + '\n'
missing_out.write(msg)
continue
# also parse out the xml file that references the image
article_xml_filename = os.path.basename(xml_page)
# identify the image for the current xml file
current_img = os.path.basename(article_xml_filename)
current_img = current_img.replace('.articles.xml','.jp2')
# d[issue_directory][img_file] = [{rect_id: , rect_coords: }]
imgs_to_crop[issue_directory][img_with_rect].append({
'coords': xml_coordinate_array,
'rect_id': rect_id
})
# articles that appear on multiple pages have their XML coordinates
# expressed on each page where they occur--this is part of the ALTO
# XML schema. To ensure we don't create duplicate outputs for those
# articles, examine the first XML coordinate for the given rect.
# If the image for that coord set occurs on the current image, store
# the mapping, else continue
if xml_coord_index == 0:
if current_img != img_with_rect:
rects_already_stored = True
print article_index, xml_coord_index, img_with_rect, rect_id
if not rects_already_stored:
rects_to_articles[issue_directory][article_xml_filename][article_ | index].append({
'img_with_rect': img_with_rect,
'rect_coords': xml_coordinate_array,
'rect_id': rect_id
})
rect_id +=1
with open('rects_to_articles.json', 'w') as out:
json.dump(rects_to_articles, out)
with open('imgs_to_crop.json', 'w') as out:
json.dump(imgs_to_crop, out)
####################################
# Store the tit | les of each article #
####################################
def store_article_titles():
'''
Persist to disk a mapping from the path to an article to the given
article's title
'''
article_to_title = {}
with open('rects_to_articles.json') as f:
rects_to_articles = json.load(f)
for issue in rects_to_articles.keys():
for page in rects_to_articles[issue].keys():
for article_index in rects_to_articles[issue][page]:
page_number = page.split('.')[0]
first_image = rects_to_articles[issue][page][article_index][0]
article_path = os.path.join(issue, page_number, article_index)
# store the mapping from article path to first image path
first_image_name = str(first_image['rect_id']) + '.png'
path_to_first_image = os.path.join(article_path, first_image_name)
article_to_title[article_path] = path_to_first_image
with open('articles_to_titles.json', 'w') as out:
json.dump(article_to_title, out)
##################
# Segment Images #
##################
def segment_images(process_id):
'''
Read in a process id that will indicate which enumerated issues
the current process is responsible for. Then read into memory
the imgs_to_crop JSON, read in the
numpy array that corresponds to each image file identified in
that JSON file, pluck out the appropriate rectangles, and save
them to disk. To ensure equal division of labor among processors,
only allow the current process to work on an issue directory if
issue directory index position % total number of processes ==
process id
'''
with open('imgs_to_crop.json') as f:
rectangle_mappings = json.load(f)
for c, issue_directory in enumerate(rectangle_mappings.iterkeys()):
# to distribute work among available processors, enumerate the issue directories,
# take the current issue directory number modulo the total number of processes,
# and check if that value == the current process id; if not, continue
if c % n_processes != process_id:
continue
for page in rectangle_mappings[issue_directory].iterkeys():
# fetch the numpy array for cropping
jp2_array = jp2_pa |
# django imports
from django.contrib.sites.models import Site
from django.template import Library
from django.utils.safestring import mark_safe
from lfs_downloadable_products import views
register = Library | ()
@register.simple_tag( | takes_context=True)
def manage_attachments(context, product):
request = context.get('request', None)
result = views.manage_attachments(request, product, True)
return mark_safe(result)
@register.inclusion_tag('lfs_downloadable_products/display_attachments.html', takes_context=True)
def downloadable_attachments(context, order):
from lfs_downloadable_products.models import ProductUrl
urls = []
exists = {}
for url in ProductUrl.objects.filter(order=order).order_by("-creation_date"):
if url.attachment.id not in exists:
urls.append(url)
exists[url.attachment.id] = 1
return {
"urls": urls,
"domain": Site.objects.get_current().domain,
}
|
se_id': course_id.to_deprecated_string(),
'user_id': request.user.id,
}
if not request.GET.get('sort_key'):
# If the user did not select a sort key, use their last used sort key
cc_user = cc.User.from_django_user(request.user)
cc_user.retrieve()
# TODO: After the comment service is updated this can just be user.default_sort_key because the service returns the default value
default_query_params['sort_key'] = cc_user.get('default_sort_key') or default_query_params['sort_key']
else:
# If the user clicked a sort key, update their default sort key
cc_user = cc.User.from_django_user(request.user)
cc_user.default_sort_key = request.GET.get('sort_key')
cc_user.save()
#there are 2 dimensions to consider when executing a search with respect to group id
#is user a moderator
#did the user request a group
#if the user requested a group explicitly, give them that group, othewrise, if mod, show all, else if student, use cohort
group_id = request.GET.get('group_id')
if group_id == "all":
group_id = None
if not group_id:
if not cached_has_permission(request.user, "see_all_cohorts", course_id):
group_id = get_cohort_id(request.user, course_id)
if group_id:
default_query_params["group_id"] = group_id
#so by default, a moderator sees all items, and a student sees his cohort
query_params = merge_dict(default_query_params,
strip_none(extract(request.GET,
['page', 'sort_key',
'sort_order', 'text',
'commentable_ids', 'flagged'])))
threads, page, num_pages = cc.Thread.search(query_params)
#now add the group name if the thread has a group id
for thread in threads:
if thread.get('group_id'):
thread['group_name'] = get_cohort_by_id(course_id, thread.get('group_id')).name
thread['group_string'] = "This post visible only to Group %s." % (thread['group_name'])
else:
thread['group_name'] = ""
thread['group_string'] = "This post visible to everyone."
#patch for backward compatibility to comments service
if not 'pinned' in thread:
thread['pinned'] = False
query_params['page'] = page
query_params['num_pages'] = num_pages
return threads, query_params
@login_required
def inline_discussion(request, course_id, discussion_id):
"""
Renders JSON for DiscussionModules
"""
nr_transaction = newrelic.agent.current_transaction()
course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load_forum', course_id)
threads, query_params = get_threads(request, course_id, discussion_id, per_page=INLINE_THREADS_PER_PAGE)
cc_user = cc.User.from_django_user(request.user)
user_info = cc_user.to_dict()
with newrelic.agent.FunctionTrace(nr_transaction, "get_metadata_for_threads"):
annotated_content_info = utils.get_metadata_for_threads(course_id, threads, request.user, user_info)
allow_anonymou | s = course | .allow_anonymous
allow_anonymous_to_peers = course.allow_anonymous_to_peers
#since inline is all one commentable, only show or allow the choice of cohorts
#if the commentable is cohorted, otherwise everything is not cohorted
#and no one has the option of choosing a cohort
is_cohorted = is_course_cohorted(course_id) and is_commentable_cohorted(course_id, discussion_id)
is_moderator = cached_has_permission(request.user, "see_all_cohorts", course_id)
cohorts_list = list()
if is_cohorted:
cohorts_list.append({'name': _('All Groups'), 'id': None})
#if you're a mod, send all cohorts and let you pick
if is_moderator:
cohorts = get_course_cohorts(course_id)
for cohort in cohorts:
cohorts_list.append({'name': cohort.name, 'id': cohort.id})
else:
#students don't get to choose
cohorts_list = None
return utils.JsonResponse({
'discussion_data': map(utils.safe_content, threads),
'user_info': user_info,
'annotated_content_info': annotated_content_info,
'page': query_params['page'],
'num_pages': query_params['num_pages'],
'roles': utils.get_role_ids(course_id),
'allow_anonymous_to_peers': allow_anonymous_to_peers,
'allow_anonymous': allow_anonymous,
'cohorts': cohorts_list,
'is_moderator': is_moderator,
'is_cohorted': is_cohorted
})
@login_required
def forum_form_discussion(request, course_id):
"""
Renders the main Discussion page, potentially filtered by a search query
"""
course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id)
nr_transaction = newrelic.agent.current_transaction()
course = get_course_with_access(request.user, 'load_forum', course_id)
with newrelic.agent.FunctionTrace(nr_transaction, "get_discussion_category_map"):
category_map = utils.get_discussion_category_map(course)
try:
unsafethreads, query_params = get_threads(request, course_id) # This might process a search query
threads = [utils.safe_content(thread) for thread in unsafethreads]
except cc.utils.CommentClientMaintenanceError:
log.warning("Forum is in maintenance mode")
return render_to_response('discussion/maintenance.html', {})
user = cc.User.from_django_user(request.user)
user_info = user.to_dict()
with newrelic.agent.FunctionTrace(nr_transaction, "get_metadata_for_threads"):
annotated_content_info = utils.get_metadata_for_threads(course_id, threads, request.user, user_info)
with newrelic.agent.FunctionTrace(nr_transaction, "add_courseware_context"):
add_courseware_context(threads, course)
if request.is_ajax():
return utils.JsonResponse({
'discussion_data': threads, # TODO: Standardize on 'discussion_data' vs 'threads'
'annotated_content_info': annotated_content_info,
'num_pages': query_params['num_pages'],
'page': query_params['page'],
})
else:
with newrelic.agent.FunctionTrace(nr_transaction, "get_cohort_info"):
cohorts = get_course_cohorts(course_id)
cohorted_commentables = get_cohorted_commentables(course_id)
user_cohort_id = get_cohort_id(request.user, course_id)
context = {
'csrf': csrf(request)['csrf_token'],
'course': course,
#'recent_active_threads': recent_active_threads,
'staff_access': has_access(request.user, 'staff', course),
'threads': saxutils.escape(json.dumps(threads), escapedict),
'thread_pages': query_params['num_pages'],
'user_info': saxutils.escape(json.dumps(user_info), escapedict),
'flag_moderator': cached_has_permission(request.user, 'openclose_thread', course.id) or has_access(request.user, 'staff', course),
'annotated_content_info': saxutils.escape(json.dumps(annotated_content_info), escapedict),
'course_id': course.id.to_deprecated_string(),
'category_map': category_map,
'roles': saxutils.escape(json.dumps(utils.get_role_ids(course_id)), escapedict),
'is_moderator': cached_has_permission(request.user, "see_all_cohorts", course_id),
'cohorts': cohorts,
'user_cohort': user_cohort_id,
'cohorted_commentables': cohorted_commentables,
'is_course_cohorted': is_course_cohorted(course_id)
}
# print "start rendering.."
return render_to_response('discussion/index.html', context)
@require_GET
@login_required
def single_thread(request, course_id, discussion_id, thread_id):
course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id)
nr_transaction = newrelic.agent.current_transaction()
course = get |
import numpy
from matplotlib import pyplot as plot
import matplotlib.cm as cm
def | iter_count(C, max_iter):
X = C
for n in range(max_iter):
if abs(X) > 2.:
return n
X = X ** 2 + C
return max_iter
N = 512
max_iter = 64
xmin, xmax, ymin, ymax = -0.32, -0.22, 0.8, 0.9
X = numpy.linspace(xmin, xmax, N)
Y = numpy.linspace(ymin, ymax, N)
Z = numpy.empty((N, N))
for i, y in enumerate(Y):
for j, x in enumerate(X): |
Z[i, j] = iter_count(complex(x, y), max_iter)
plot.imshow(Z,
cmap = cm.binary,
interpolation = 'bicubic',
origin = 'lower',
extent=(xmin, xmax, ymin, ymax))
levels = [8, 12, 16, 20]
ct = plot.contour(X, Y, Z, levels, cmap = cm.gray)
plot.clabel(ct, fmt='%d')
plot.show()
|
# -*- coding: utf-8 -*-
"""
Display vnstat statistics.
Coloring rules.
If value is bigger that dict key, status string will turn to color, specified
in the value.
Example:
coloring = {
800: "#dddd00",
900: "#dd0000",
| }
(0 - 800: white, 800-900: yellow, >900 - red)
Format of status string placeh | olders:
{down} download
{total} total
{up} upload
Requires:
- external program called `vnstat` installed and configured to work.
@author shadowprince
@license Eclipse Public License
"""
from __future__ import division # python2 compatibility
from time import time
from subprocess import check_output
def get_stat(statistics_type):
"""
Get statistics from devfile in list of lists of words
"""
def filter_stat():
out = check_output(["vnstat", "--dumpdb"]).decode("utf-8").splitlines()
for x in out:
if x.startswith("{};0;".format(statistics_type)):
return x
try:
type, number, ts, rxm, txm, rxk, txk, fill = filter_stat().split(";")
except OSError as e:
print("Looks like you haven't installed or configured vnstat!")
raise e
except ValueError:
err = "vnstat returned wrong output, "
err += "maybe it's configured wrong or module is outdated"
raise RuntimeError(err)
up = (int(txm) * 1024 + int(txk)) * 1024
down = (int(rxm) * 1024 + int(rxk)) * 1024
return {
"up": up,
"down": down,
"total": up+down
}
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 180
coloring = {}
format = "{total}"
# initial multiplier, if you want to get rid of first bytes, set to 1 to
# disable
initial_multi = 1024
left_align = 0
# if value is greater, divide it with unit_multi and get next unit from
# units
multiplier_top = 1024
precision = 1
statistics_type = "d" # d for daily, m for monthly
unit_multi = 1024 # value to divide if rate is greater than multiplier_top
def __init__(self, *args, **kwargs):
"""
Format of total, up and down placeholders under FORMAT.
As default, substitutes left_align and precision as %s and %s
Placeholders:
value - value (float)
unit - unit (string)
"""
self.last_stat = get_stat(self.statistics_type)
self.last_time = time()
self.last_interface = None
self.value_format = "{value:%s.%sf} {unit}" % (
self.left_align, self.precision
)
# list of units, first one - value/initial_multi, second - value/1024,
# third - value/1024^2, etc...
self.units = ["kb", "mb", "gb", "tb", ]
def _divide_and_format(self, value):
"""
Divide a value and return formatted string
"""
value /= self.initial_multi
for i, unit in enumerate(self.units):
if value > self.multiplier_top:
value /= self.unit_multi
else:
break
return self.value_format.format(value=value, unit=unit)
def currentSpeed(self, i3s_output_list, i3s_config):
stat = get_stat(self.statistics_type)
color = None
keys = list(self.coloring.keys())
keys.sort()
for k in keys:
if stat["total"] < k * 1024 * 1024:
break
else:
color = self.coloring[k]
response = {
'cached_until': time() + self.cache_timeout,
'full_text': self.format.format(
total=self._divide_and_format(stat['total']),
up=self._divide_and_format(stat['up']),
down=self._divide_and_format(stat['down']),
),
'transformed': True
}
if color:
response["color"] = color
return response
if __name__ == "__main__":
"""
Test this module by calling it directly.
"""
from time import sleep
x = Py3status()
config = {
'color_good': '#00FF00',
'color_bad': '#FF0000',
}
while True:
print(x.currentSpeed([], config))
sleep(1)
|
= ""
for alignment in alignments_to_d_resized:
# Get the information for each alignment file
l_stat, significant, left_counts_res, right_counts_res, num_ignored, chisq, pval = alignments_to_d_resized[alignment]
output_resized = [("Final Overall D value using Block Resizing method: ", l_stat),
("Significant deviation from 0: ", significant),
("Overall p value: ", pval),
("Overall Chi-Squared statistic: ", chisq),
("", ""),
("Number of site ignored due to \"N\" or \"-\": ", num_ignored)]
l_stat, significant, left_counts_pcoeff, right_counts, num_ignored, chisq, pval = alignments_to_d_pattern_coeff[alignment]
output_pattern_coeff = [("Final Overall D value using Pattern Weighting method: ", l_stat),
("Significant deviation from 0: ", significant),
("Overall p value: ", pval),
("Overall Chi-Squared statistic: ", chisq),
("", ""),
("Number of site ignored due to \"N\" or \"-\": ", num_ignored)]
l_stat, significant, left_counts_ocoeff, right_counts, num_ignored, chisq, pval, coeff = alignments_to_d_ovr_coeff[alignment]
output_overall_coeff = [("Final Overall D value using Overall Weighting method: ", l_stat),
("Significant deviation from 0: ", significant),
("Overall p value: ", pval),
("Overall Chi-Squared statistic: ", chisq),
("", ""),
("Number of site ignored due to \"N\" or \"-\": ", num_ignored)]
# Create the output string
s += "\n"
s += "\n"
s += alignment + ": "
s += "\n"
n += "\n" + "\n" + alignment + ": " + "\n"
# Print output for resizing method
for output in output_resized:
s += str(output[0]) + str(output[1]) + "\n"
n += str(output[0]) + str(output[1]) + "\n"
s += "Left term counts: " + "\n"
for pattern in left_counts_res:
s += pattern + ": {0}".format(left_counts_res[pattern]) + "\n"
s += "\n"
s += "Right term counts: " + "\n"
for pattern in right_counts_res:
s += pattern + ": {0}".format(right_counts_res[pattern]) + "\n"
s += "\n"
s += "\n"
# Print output for pattern coefficient method
for output in output_pattern_coeff:
s += str(output[0]) + str(output[1]) + "\n"
s += "Left term counts weighted by pattern probability: " + "\n"
for pattern in left_counts_pcoeff:
s += pattern + ": {0}".format(left_counts_pcoeff[pattern]) + "\n"
s += "\n"
s += "Right term counts: " + "\n"
for pattern in right_counts:
s += pattern + ": {0}".format(right_counts[pattern]) + "\n"
s += "\n"
s += "\n"
# Print output for overall coefficient method
for output in output_overall_coeff:
s += str(output[0]) + str(output[1]) + "\n"
s += "Overall Coefficient for weighting: {0}".format(coeff) + "\n"
s += "Left term counts after weighting: " + "\n"
for pattern in left_counts_ocoeff:
s += pattern + ": {0}".format(left_counts_ocoeff[pattern] * coeff) + "\n"
s += "\n"
s += "Right term counts: " + "\n"
for pattern in right_counts:
s += pattern + ": {0}".format(right_counts[pattern]) + "\n"
return s
def plot_formatting(info_tuple, name, meta):
"""
Reformats and writes the dictionary output to a text file to make plotting it in Excel easy
Input:
info_tuple --- a tuple from the calculate_generalized output
"""
alignments_to_d, alignments_to_windows_to_d = info_tuple
num = 0
file_name = "{0}_{1}.txt".format(name, num)
while os.path.exists(file_name):
num += 1
file_name = "{0}_{1}.txt".format(name, num)
with open(file_name, "w") as text_file:
for alignment in alignments_to_d:
l_stat, significant = alignments_to_d[alignment][0], alignments_to_d[alignment][1]
significant = str(significant).upper()
windows_to_l = alignments_to_windows_to_d[alignment]
output_str = "{0}, {1}, {2} \n".format(l_stat, meta, significant)
text_file.write(output_str)
text_file.close()
if __name__ == '__main__':
r = [('P3', 'P2')]
species_tree = '(((P1,P2),P3),O);'
# species_tree = '((P1,P2),(P3,O));'
# species_tree = '(((P1,P2),(P3,P4)),O);' # DFOIL tree
# species_tree = '((((P1,P2),P3),P4),O);' # Smallest asymmetrical tree
# species_tree = '(((P1,P2),(P3,(P4,P5))),O);'
# n = '((P2,(P1,P3)),O);'
# n = '(((P1,P3),P2),O);'
# n = '((P1,(P2,(P3,P4))),O);'
# t = ["P1", "P2", "P3", "P4", "O"]
# o = "O"
# print site_pattern_generator(t, n, o, False)
if platform == "darwin":
alignments = ["/Users/Peter/PycharmProjects/ALPHA/exampleFiles/seqfile.txt"]
else:
alignments = ["C:\\Users\\travi\\Desktop\\dFoilStdPlusOneFar50kbp\\dFoilStdPlusOneFar50kbp\\sim2\\seqfile.txt"]
# alignments = ["C:\\Users\\travi\\Desktop\\dFoilStdPlusOneFar50kbp\\dFoilStdPlusOneFar50kbp\\sim5\\seqfile",
# "C:\\Users\\travi\\Desktop\\dFoilStdPlusOneFar50kbp\\dFoilStdPlusOneFar50kbp\\sim7\\seqfile",
# "C:\\Users\\travi\\Desktop\\dFoilStdPlusOneFar50kbp\\dFoilStdPlusOneFar50kbp\\sim4\\seqfile",
# "C:\\Users\\travi\\Desktop\\dFoilStdPlusOneFar50kbp\\dFoilStdPlusOneFar50kbp\\sim6\\seqfile",
# "C:\\Users\\travi\\Desktop\\dFoilStdPlusOneFar50kbp\\dFoilStdPlusOneFar50kbp\\sim8\\seqfile"]
print calculate_generalized(alignments, species_tree, r, "O", 500000, 500000,
alpha=0.01, verbose=False, use_inv=False)
# alignments = ["C:\\Users\\travi\\Desktop\\390 Errors\\seqfileNames"]
#
#
# species_tree, r = '((((P1,P4),P3),P2),O);', [('P3', 'P2'),('P1', 'P2')]
#
# # 3 to 2
# calculate_generalized( ['C:\\Users\\travi\\Desktop\\390 Errors\\seqfileNames'], '(((P5,P6),((P1,P2),P3)),P4);', [('P3', 'P2')], 50000, 50000, True, save=True, f='stat_6tax_sub_3to2.txt')
# print "done"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# calculate_generalized( ['C:\\Users\\travi\\Desktop\\390 Errors\\seqfileNames'], '(((P5,P6),((P1,P2),P3)),P4);', [('P3', 'P2')], 50000, 50000, True, save=True, f='stat_inv_6tax_sub_3to2.txt', use_inv=True)
# print "done with inverses"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# # 4 to 3
# calculate_generalized( ['C:\\Users\\travi\\Desktop\\390 Errors\\seqfileNames'], '(((P5,P6),((P1,P2),P3)),P4);', [('P4', 'P3')], 50000, 50000, True, save=True, f='stat_6tax_sub_4to3.txt')
# print "done"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# calculate_generalized( ['C:\\Users\\travi\\Desktop\\390 Errors\\seqfileNames'], '(((P5,P6),((P1,P2),P3)),P4);', [('P4', 'P3')], 50000, 50000, True, save=True, f='stat_inv_6tax_sub_4to3.txt', use_inv=True)
# print "done with inverses"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# # both
# calculate_generalized(['C:\\Users\\travi\\Desktop\\390 Errors\\seqfileNames'], '(((P5,P6),((P1,P2),P3)),P4);', [('P3', 'P2'),('P4', 'P3')], 50000, 50000, True, save=True, f= | 'stat_6tax_sub_3to2_4to3.txt')
# print "done"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# calculate_generalized(['C:\\Users\\travi\\Desktop\\390 Errors\\seqfileNames'], '(((P5,P6),((P1,P2),P3)),P4);', [('P3', 'P2'),('P4', 'P3')], 50000, 50000, True, save=True, f='stat_inv_6tax_sub_3to2_4to3.txt', use_inv=True)
# print "done with inverses"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
# playsound.playsound("C:\\Users\\travi\\ | Downloads\\app- |
"""
This module has the mock object definitions used to hold reference geometry
for the GEOS and GDAL tests.
"""
import json
import os
from django.utils.functional import cached_property
# Path where reference test data is located.
TEST_DATA = os.path.join(os.path.dirname(__file__), 'data')
def tuplize(seq):
"Turn all nested sequences to tuples in given sequence."
if isinstance(seq, (list, tuple)):
return tuple(tuplize(i) for i in seq)
return seq
def strconvert(d):
"Converts all keys in dictionary to str type."
return {str(k): v for k, v in d.items()}
def get_ds_file(name, ext):
return os.path.join(TEST_DATA,
name,
name + '.%s' % ext
)
class TestObj:
"""
Base testing object, turns keyword args into attributes.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class TestDS(TestObj):
"""
Object for testing GDAL data sources.
"""
def __init__(self, name, *, ext='shp', **kwargs):
# Shapefile is default extension, unless specified otherwise.
self.ds = get_ds_file(name, ext)
super().__init__(**kwargs)
class TestGeom(TestObj):
"""
Testing object used for wrapping reference geometry data
in GEOS/GDAL tests.
"""
def __init__(self, *, coords=None, centroid=None, ext_ring_cs=None, **kwargs):
# Converting lists to tuples of certain keyword args
# so coordinate test cases will match (JSON has no
# concept of tuple).
if coords:
self.coords = tuplize(coords)
if centroid:
self.centroid = tuple(centroid)
if ext_ring_cs:
ext_ring_cs = tuplize(ext_ring_cs)
self.ext_ring_cs = ext_ring_cs
super().__init__(**kwargs)
class TestGeomSet:
"""
Each attribute of this object is a list of `TestGeom` instances.
"""
def __init__(self, **kwar | gs):
for key, value in kwargs.items():
setattr(self, key, [TestGeom(**strconvert(kw)) for kw in value])
class TestDataMixin:
"""
Mixin used for GEOS/GDAL test cases that defines a `geometries`
property, which returns and/or loads the r | eference geometry data.
"""
@cached_property
def geometries(self):
# Load up the test geometry data from fixture into global.
with open(os.path.join(TEST_DATA, 'geometries.json')) as f:
geometries = json.load(f)
return TestGeomSet(**strconvert(geometries))
|
""" A location-aware script to manage ringer volume """
__author__ = 'Marco Bonifacio <bonifacio.marco@gmail.com>'
__licens | e__ = 'MIT License'
import android
import time
# Parameters
SSID = {'bonix-lan': 'casa',
'ZENIT SECURED WPA': 'lavoro'}
RINGER = {'casa': 5,
'lavoro': 2,
'sconosciuto': 5}
# Functions
def check_ssid(droid):
""" Check if wireless network SSID is known.
Args:
droid: an Android instance.
Returns:
a string representing a known or unknown environment. """
state | = 'sconosciuto'
try:
lwifi = droid.wifiGetScanResults().result
lssid = [w['ssid']for w in lwifi]
for s in lssid:
if s in SSID:
state = SSID[s]
except Exception, e:
droid.notify('PyLocale', 'Errore: {}'.format(e))
finally:
return(state)
def check_state(droid, state, stateold):
""" Check if environment has changed.
Args:
droid: an Android instance.
state: a string, the present state.
stateold: a string, the former state.
Returns:
a binary true if environment has changed. """
if state != stateold:
droid.vibrate()
if state != 'sconosciuto':
droid.makeToast('Sei a {}'.format(state))
else:
droid.makeToast('Sei uscito da {}'.format(stateold))
return(True)
else:
return(False)
def set_ringer(droid, state):
""" Set the ringer volume depending on state.
Args:
droid: an Android instance.
state: a string, the present state.
Returns:
nothing. """
droid.setRingerVolume(RINGER[state])
droid.makeToast('Volume: {}'.format(RINGER[state]))
if __name__ == '__main__':
droid = android.Android()
state = 'sconosciuto'
while True:
stateold = state
state = check_ssid(droid)
changed = check_state(droid, state, stateold)
if changed is True:
set_ringer(droid, state)
time.sleep(300) |
from fitbit import Fitbit
from datetime import date, datetime, time, timedelta
import json
# '''merges two response dicts based on the keys'''
# def combine(a, b):
# c = {}
# for key in a.keys():
# if (key.endswith('-intraday')):
# c[key] = a[key]
# c[key]['dataset'].extend(b[key]['dataset'])
# else:
# c[key] = a[key]
# c[key].extend(b[key])
# return c
class FitbitIntraDay():
def __init__(self, fitbit):
self.fitbit = fitbit;
if (self.fitbit.token == None):
self.fitbit.get_token()
def get_intraday_time_series(self, resource_path, from_datetime, to_datetime, format='json'):
# from_date and to_date of type datetime
# use fitbit_timeseries helper functions for a list of possible resource paths
if from_datetime is None:
return []
if to_datetime is No | ne :
return [self.get_day_time_series(resource_path, from_datetime.date(), format)]
if (to_datetime.date() == from_dat | etime.date()):
return [self.get_time_interval_time_series(resource_path, to_datetime.date(), from_datetime.time(), to_datetime.time(), format)]
else:
out = [self.get_time_interval_time_series(resource_path, from_datetime.date(), from_datetime.time(), time(23, 59), format)]
delta = timedelta(days=1)
next = from_datetime.date() + delta
while (next < to_datetime.date()):
out.append(self.get_day_time_series(resource_path, next, format))
next = next + delta
out.append(self.get_time_interval_time_series(resource_path, to_datetime.date(), time(0, 0), to_datetime.time(), format))
return out
def get_day_time_series(self, resource_path, date=date.today(), format='json'):
url = "/1/user/-/{0}/date/{1}/1d/1min.{2}".format(resource_path, date.isoformat(), format)
data = self.fitbit.call_get_api(url)
return json.loads(data)
def get_time_interval_time_series(self, resource_path, date=date.today(), from_time=time(0, 0), to_time=time(23,59), format='json'):
url = "/1/user/-/{0}/date/{1}/1d/1min/time/{2}/{3}.{4}".format(resource_path, date.isoformat(), from_time.strftime("%H:%M"), to_time.strftime("%H:%M"), format)
data = self.fitbit.call_get_api(url)
return json.loads(data)
def get_calories(self, from_datetime, to_datetime=None, format='json'):
return self.get_intraday_time_series("activities/calories", from_datetime, to_datetime, format)
def get_steps(self, from_datetime, to_datetime=None, format='json'):
return self.get_intraday_time_series("activities/steps", from_datetime, to_datetime, format)
def get_distance(self, from_datetime, to_datetime=None, format='json'):
return self.get_intraday_time_series("activities/distance", from_datetime, to_datetime, format)
def get_floors(self, from_datetime, to_datetime=None, format='json'):
return self.get_intraday_time_series("activities/floors", from_datetime, to_datetime, format)
def get_elevation(self, from_datetime, to_datetime=None, format='json'):
return self.get_intraday_time_series("activities/elevation", from_datetime, to_datetime, format)
|
is_virtual=True)
## li-ion-energy-source.h: ns3::Time ns3::LiIonEnergySource::GetEnergyUpdateInterval() const [member function]
cls.add_method('GetEnergyUpdateInterval',
'ns3::Time',
[],
is_const=True)
## li-ion-energy-source.h: double ns3::LiIonEnergySource::GetInitialEnergy() const [member function]
cls.add_method('GetInitialEnergy',
'double',
[],
is_const=True, is_virtual=True)
## li-ion-energy-source.h: double ns3::LiIonEnergySource::GetRemainingEnergy() [member function]
cls.add_method('GetRemainingEnergy',
'double',
[],
is_virtual=True)
## li-ion-energy-source.h: double ns3::LiIonEnergySource::GetSupplyVoltage() const [member function]
cls.add_method('GetSupplyVoltage',
'double',
[],
is_const=True, is_virtual=True)
## li-ion-energy-source.h: static ns3::TypeId ns3::LiIonEnergySource::GetTypeId() [member function]
cls.add_method | ('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## li-ion-energy-source.h: void ns3::LiIonEnergySource::IncreaseRemainingEnergy(double energyJ) [member function]
cls.add_method('IncreaseRemainingEnergy | ',
'void',
[param('double', 'energyJ')],
is_virtual=True)
## li-ion-energy-source.h: void ns3::LiIonEnergySource::SetEnergyUpdateInterval(ns3::Time interval) [member function]
cls.add_method('SetEnergyUpdateInterval',
'void',
[param('ns3::Time', 'interval')])
## li-ion-energy-source.h: void ns3::LiIonEnergySource::SetInitialEnergy(double initialEnergyJ) [member function]
cls.add_method('SetInitialEnergy',
'void',
[param('double', 'initialEnergyJ')])
## li-ion-energy-source.h: void ns3::LiIonEnergySource::SetInitialSupplyVoltage(double supplyVoltageV) [member function]
cls.add_method('SetInitialSupplyVoltage',
'void',
[param('double', 'supplyVoltageV')])
## li-ion-energy-source.h: void ns3::LiIonEnergySource::UpdateEnergySource() [member function]
cls.add_method('UpdateEnergySource',
'void',
[],
is_virtual=True)
## li-ion-energy-source.h: void ns3::LiIonEnergySource::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
## li-ion-energy-source.h: void ns3::LiIonEnergySource::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3RvBatteryModel_methods(root_module, cls):
## rv-battery-model.h: ns3::RvBatteryModel::RvBatteryModel(ns3::RvBatteryModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RvBatteryModel const &', 'arg0')])
## rv-battery-model.h: ns3::RvBatteryModel::RvBatteryModel() [constructor]
cls.add_constructor([])
## rv-battery-model.h: double ns3::RvBatteryModel::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## rv-battery-model.h: double ns3::RvBatteryModel::GetBatteryLevel() [member function]
cls.add_method('GetBatteryLevel',
'double',
[])
## rv-battery-model.h: double ns3::RvBatteryModel::GetBeta() const [member function]
cls.add_method('GetBeta',
'double',
[],
is_const=True)
## rv-battery-model.h: double ns3::RvBatteryModel::GetCutoffVoltage() const [member function]
cls.add_method('GetCutoffVoltage',
'double',
[],
is_const=True)
## rv-battery-model.h: double ns3::RvBatteryModel::GetEnergyFraction() [member function]
cls.add_method('GetEnergyFraction',
'double',
[],
is_virtual=True)
## rv-battery-model.h: double ns3::RvBatteryModel::GetInitialEnergy() const [member function]
cls.add_method('GetInitialEnergy',
'double',
[],
is_const=True, is_virtual=True)
## rv-battery-model.h: ns3::Time ns3::RvBatteryModel::GetLifetime() const [member function]
cls.add_method('GetLifetime',
'ns3::Time',
[],
is_const=True)
## rv-battery-model.h: int ns3::RvBatteryModel::GetNumOfTerms() const [member function]
cls.add_method('GetNumOfTerms',
'int',
[],
is_const=True)
## rv-battery-model.h: double ns3::RvBatteryModel::GetOpenCircuitVoltage() const [member function]
cls.add_method('GetOpenCircuitVoltage',
'double',
[],
is_const=True)
## rv-battery-model.h: double ns3::RvBatteryModel::GetRemainingEnergy() [member function]
cls.add_method('GetRemainingEnergy',
'double',
[],
is_virtual=True)
## rv-battery-model.h: ns3::Time ns3::RvBatteryModel::GetSamplingInterval() const [member function]
cls.add_method('GetSamplingInterval',
'ns3::Time',
[],
is_const=True)
## rv-battery-model.h: double ns3::RvBatteryModel::GetSupplyVoltage() const [member function]
cls.add_method('GetSupplyVoltage',
'double',
[],
is_const=True, is_virtual=True)
## rv-battery-model.h: static ns3::TypeId ns3::RvBatteryModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## rv-battery-model.h: void ns3::RvBatteryModel::SetAlpha(double alpha) [member function]
cls.add_method('SetAlpha',
'void',
[param('double', 'alpha')])
## rv-battery-model.h: void ns3::RvBatteryModel::SetBeta(double beta) [member function]
cls.add_method('SetBeta',
'void',
[param('double', 'beta')])
## rv-battery-model.h: void ns3::RvBatteryModel::SetCutoffVoltage(double voltage) [member function]
cls.add_method('SetCutoffVoltage',
'void',
[param('double', 'voltage')])
## rv-battery-model.h: void ns3::RvBatteryModel::SetNumOfTerms(int num) [member function]
cls.add_method('SetNumOfTerms',
'void',
[param('int', 'num')])
## rv-battery-model.h: void ns3::RvBatteryModel::SetOpenCircuitVoltage(double voltage) [member function]
cls.add_method('SetOpenCircuitVoltage',
'void',
[param('double', 'voltage')])
## rv-battery-model.h: void ns3::RvBatteryModel::SetSamplingInterval(ns3::Time interval) [member function]
cls.add_method('SetSamplingInterval',
'void',
[param('ns3::Time', 'interval')])
## rv-battery-model.h: void ns3::RvBatteryModel::UpdateEnergySource() [member function]
cls.add_method('UpdateEnergySource',
'void',
[],
is_virtual=True)
## rv-battery-model.h: void ns3::RvBatteryModel::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
## rv-battery-model.h: void ns3::RvBatteryModel::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='private', is_virtual=True)
retu |
ted = self.convertFromStr(data, 'int')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
module_execs = []
# read children
for child in node.getchildren():
if child.tag == 'module_exec':
child.tag = 'moduleExec'
if child.tag == 'moduleExec':
_data = self.getDao('module_exec').fromXML(child)
module_execs.append(_data)
elif child.text is None or child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBWorkflowExec(id=id,
user=user,
ip=ip,
session=session,
vt_version=vt_version,
ts_start=ts_start,
ts_end=ts_end,
parent_id=parent_id,
parent_type=parent_type,
parent_version=parent_version,
completed=completed,
name=name,
module_execs=module_execs)
obj.is_dirty = False
return obj
def toXML(self, workflow_exec, node=None):
if node is None:
node = ElementTree.Element('workflowExec')
# set attributes
node.set('id',self.convertToStr(workflow_exec.db_id, 'long'))
node.set('user',self.convertToS | tr(workflow_exec.db_ | user, 'str'))
node.set('ip',self.convertToStr(workflow_exec.db_ip, 'str'))
node.set('session',self.convertToStr(workflow_exec.db_session, 'long'))
node.set('vtVersion',self.convertToStr(workflow_exec.db_vt_version, 'str'))
node.set('tsStart',self.convertToStr(workflow_exec.db_ts_start, 'datetime'))
node.set('tsEnd',self.convertToStr(workflow_exec.db_ts_end, 'datetime'))
node.set('parentId',self.convertToStr(workflow_exec.db_parent_id, 'long'))
node.set('parentType',self.convertToStr(workflow_exec.db_parent_type, 'str'))
node.set('parentVersion',self.convertToStr(workflow_exec.db_parent_version, 'long'))
node.set('completed',self.convertToStr(workflow_exec.db_completed, 'int'))
node.set('name',self.convertToStr(workflow_exec.db_name, 'str'))
# set elements
module_execs = workflow_exec.db_module_execs
for module_exec in module_execs:
childNode = ElementTree.SubElement(node, 'module_exec')
self.getDao('module_exec').toXML(module_exec, childNode)
return node
class DBConnectionXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'connection':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
ports = []
# read children
for child in node.getchildren():
if child.tag == 'port':
_data = self.getDao('port').fromXML(child)
ports.append(_data)
elif child.text is None or child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBConnection(id=id,
ports=ports)
obj.is_dirty = False
return obj
def toXML(self, connection, node=None):
if node is None:
node = ElementTree.Element('connection')
# set attributes
node.set('id',self.convertToStr(connection.db_id, 'long'))
# set elements
ports = connection.db_ports
for port in ports:
childNode = ElementTree.SubElement(node, 'port')
self.getDao('port').toXML(port, childNode)
return node
class DBActionXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'action':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('prevId', None)
prevId = self.convertFromStr(data, 'long')
data = node.get('date', None)
date = self.convertFromStr(data, 'datetime')
data = node.get('session', None)
session = self.convertFromStr(data, 'long')
data = node.get('user', None)
user = self.convertFromStr(data, 'str')
data = node.get('prune', None)
prune = self.convertFromStr(data, 'int')
annotations = []
operations = []
# read children
for child in node.getchildren():
if child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
annotations.append(_data)
elif child.tag == 'add':
_data = self.getDao('add').fromXML(child)
operations.append(_data)
elif child.tag == 'delete':
_data = self.getDao('delete').fromXML(child)
operations.append(_data)
elif child.tag == 'change':
_data = self.getDao('change').fromXML(child)
operations.append(_data)
elif child.text is None or child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBAction(operations=operations,
id=id,
prevId=prevId,
date=date,
session=session,
user=user,
prune=prune,
annotations=annotations)
obj.is_dirty = False
return obj
def toXML(self, action, node=None):
if node is None:
node = ElementTree.Element('action')
# set attributes
node.set('id',self.convertToStr(action.db_id, 'long'))
node.set('prevId',self.convertToStr(action.db_prevId, 'long'))
node.set('date',self.convertToStr(action.db_date, 'datetime'))
node.set('session',self.convertToStr(action.db_session, 'long'))
node.set('user',self.convertToStr(action.db_user, 'str'))
node.set('prune',self.convertToStr(action.db_prune, 'int'))
# set elements
annotations = action.db_annotations
for annotation in annotations:
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(annotation, childNode)
operations = action.db_operations
for operation in operations:
if operation.vtType == 'add':
childNode = ElementTree.SubElement(node, 'add')
self.getDao('add').toXML(operation, childNode)
elif operation.vtType == 'delete':
childNode = ElementTree.SubElement(node, 'delete')
self.getDao('delete').toXML(operation, childNode)
elif operation.vtType == 'change':
childNode = ElementTree.SubElement(node, 'change')
self.getDao('change').toXML(operation, childNode)
return node
class DBDeleteXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'delete':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('what', None)
what = self.convertFromStr(data, 'str')
data = node.get('objectId', None)
objectId = self.convertFromStr(data, 'long')
data = node.get('parentObjId', None)
|
import collections
class Solution:
def arrangeWords(self, text: str) -> str:
words = text.split()
table = collections.defaultdict(list)
for word in words | :
table[len(word)].append(word)
result = []
for key in sorted(table):
result.extend(table[key])
return ' '.join(result).capitalize()
# Sort is stable
class Solution2:
def arrangeWords(self, text: str) -> str:
| return ' '.join(sorted(text.split(), key=len)).capitalize()
|
# EMU code from https://github.com/rainforestautomation/Emu-Serial-API
from emu import *
import sys
import json
import msgpack
from xbos import get_client
from bw2python.bwtypes import PayloadObject
import time
with open("params.json") as f:
try:
params = json.loads(f.read())
except ValueError as e:
print "Invalid parameter file"
sys.exit(1)
emu_instance = emu(params["port"])
emu_instance.start_serial()
# get network info
emu_instance.get_network_info()
while not hasattr(emu_instance, 'NetworkInfo'):
time.sleep(10)
macid = emu_instance.NetworkInfo.DeviceMacId
c = get_client(agent=params["agent"], entity=params["entity"])
PONUM = (2,0,9,1)
baseuri = params["baseuri"]
signaluri = "{0}/s.emu2/{1}/i.meter/signal/meter".format(baseuri, macid)
print ">",signaluri
def send_message(msg):
"""
msg has keys:
current_demand
current_price
current_tier
current_summation_delivered
current_summation_received
"""
po = PayloadObject(PONUM, None, msgpack.packb(msg))
c.publish(signaluri, payload_objects=(po,))
msg = {}
while True:
#print emu_instance.get_instantaneous_demand()
emu_instance.get_current_summation_delivered()
emu_instance.get_instantaneous_demand('Y')
emu_instance.get_current_price('Y')
time.sleep(10)
msg['current_time'] = time.time()#int(pc.TimeStamp) + 00:00:00 1 Jan 2000
# handle PriceCluster
if hasattr(emu_instance, "PriceCluster"):
pc = emu_instance.PriceCluster
| print dir(emu_instance.PriceCluster)
msg['current_price'] = float(int( | pc.Price, 16)) / (10**int(pc.TrailingDigits,16))
msg['current_tier'] = int(pc.Tier, 16)
# handle demand
if hasattr(emu_instance, "InstantaneousDemand"):
d = emu_instance.InstantaneousDemand
msg['current_demand'] = int(d.Demand, 16)
print dir(emu_instance)
# handle current summation
if hasattr(emu_instance, "CurrentSummationDelivered"):
d = emu_instance.CurrentSummationDelivered
multiplier = int(d.Multiplier, 16)
divisor = float(int(d.Divisor, 16))
msg['current_summation_delivered'] = int(d.SummationDelivered, 16) * multiplier / divisor
msg['current_summation_received'] = int(d.SummationReceived, 16) * multiplier / divisor
send_message(msg)
emu_instance.stop_serial()
|
"""
Contains methods for accessing weekly course highlights. Weekly highlights is a
schedule experience built on the Schedules app.
"""
from __future__ import absolute_import
import logging
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from openedx.core.djangoapps.schedules.config import COURSE_UPDATE_WAFFLE_FLAG
from openedx.core.djangoapps.schedules.exceptions import CourseUpdateDoesNotExist
from openedx.core.lib.request_utils import get_request_or_stub
from xmodule.modulestore.django import modulestore
log = logging.getLogger(__name__)
def course_has_highlights(course_key):
"""
Does the course have any highlights for any section/week in it?
This ignores access checks, since highlights may be lurking in currently
inaccessible content.
"""
try:
course = _get_course_with_highlights(course_key)
except CourseUpdateDoesNotExist:
return False
else:
highlights_are_available = any(
section.highlights
for section in course.get_children()
if not section.hide_from_toc
)
if not highlights_are_available:
log.error(
"Course team enabled highlights and provided no highlights."
)
return highlights_are_available
def get_week_highlights(user, course_key, week_num):
"""
Get highlights (list of unicode strings) for a given week.
week_num starts at 1.
Raises:
CourseUpdateDoesNotExist: if highlights do not exist for
| the requested week_num.
"""
cour | se_descriptor = _get_course_with_highlights(course_key)
course_module = _get_course_module(course_descriptor, user)
sections_with_highlights = _get_sections_with_highlights(course_module)
highlights = _get_highlights_for_week(
sections_with_highlights,
week_num,
course_key,
)
return highlights
def _get_course_with_highlights(course_key):
# pylint: disable=missing-docstring
if not COURSE_UPDATE_WAFFLE_FLAG.is_enabled(course_key):
raise CourseUpdateDoesNotExist(
u"%s Course Update Messages waffle flag is disabled.",
course_key,
)
course_descriptor = _get_course_descriptor(course_key)
if not course_descriptor.highlights_enabled_for_messaging:
raise CourseUpdateDoesNotExist(
u"%s Course Update Messages are disabled.",
course_key,
)
return course_descriptor
def _get_course_descriptor(course_key):
course_descriptor = modulestore().get_course(course_key, depth=1)
if course_descriptor is None:
raise CourseUpdateDoesNotExist(
u"Course {} not found.".format(course_key)
)
return course_descriptor
def _get_course_module(course_descriptor, user):
# Fake a request to fool parts of the courseware that want to inspect it.
request = get_request_or_stub()
request.user = user
# Now evil modulestore magic to inflate our descriptor with user state and
# permissions checks.
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_descriptor.id, user, course_descriptor, depth=1, read_only=True,
)
return get_module_for_descriptor(
user, request, course_descriptor, field_data_cache, course_descriptor.id, course=course_descriptor,
)
def _get_sections_with_highlights(course_module):
return [
section for section in course_module.get_children()
if section.highlights and not section.hide_from_toc
]
def _get_highlights_for_week(sections, week_num, course_key):
# assume each provided section maps to a single week
num_sections = len(sections)
if not (1 <= week_num <= num_sections):
raise CourseUpdateDoesNotExist(
u"Requested week {} but {} has only {} sections.".format(
week_num, course_key, num_sections
)
)
section = sections[week_num - 1]
return section.highlights
|
#!/usr/bin/env python
from ansible.module_utils.hashivault import hashivault_argspec
from ansible.module_utils.hashivault import hashivault_auth_client
from ansible.module_utils.hashivault import hashivault_init
from ansible.module_utils.hashivault import hashiwrapper
ANSIBLE_METADATA = {'status': ['stableinterface'], 'supported_by': 'community', 'version': '1.1'}
DOCUMENTATION = '''
---
module: hashivault_approle_role_get
version_added: "3.8.0"
short_description: Hashicorp Vault approle role get module
description:
- Module to get a approle role from Hashicorp Vault.
options:
name:
description:
- role name.
mount_point:
description:
- mount point for role
default: approle
extends_documentation_fragment: hashivault
'''
EXAMPLES = '''
---
- hosts: localhost
tasks:
- hashivault_approle_role_get:
name: 'ashley'
register: 'vault_approle_role_get'
- debug: msg="Role is {{vault_approle_role_get.role}}"
'''
def main():
argspec = hashivault_argspec()
argspec['name'] = dict(required=True, type='str')
argspec['mount_point'] = dict(required=False, type='str', default='approle')
module = hashivault_init(argspec)
result = hashivault_approle_role_get(module.params)
if result.get('failed'):
module.fail_json(**result)
else:
module.exit_json(**result)
@hashiwrapper
def hashivault_approle_role_get(params):
name = params.get('name')
client = hashivault_auth_client(params)
result = client.get_ | role(name, mount_point=params.get('mount_point'))
| return {'role': result}
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Entity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('entity_name', models.CharField(max_length=255)),
('entity_name_slug', models.SlugField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
| name='EntityType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('entity_type', models.CharField(max_length=255)),
| ('entity_type_slug', models.SlugField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Story',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('headline', models.CharField(max_length=255)),
('byline', models.CharField(max_length=255)),
('pubdate', models.DateTimeField()),
('description', models.TextField()),
('full_text', models.TextField()),
('word_count', models.IntegerField()),
('entities', models.ManyToManyField(to='stories.Entity')),
],
options={
},
bases=(models.Model,),
),
]
|
#! /usr/bin/python
class A1(object):
def method(*argv): return argv
a = A1()
print 'Call a.method() ->', a.method()
print 'Call a.method(1,2,3) ->', a.method(1,2,3)
print '''
!! Note that the \'a\' instance is passed (implicitely) as the first
!! function parameter (something like <__main__.A1 object at 0x7f...>
'''
''' The following will throw as A1.method is a bound method (i.e. non-static)
of the A1 class:
TypeError: unbound method method() must be called with A1 instance as
first argument (got int instance instead)
'''
# print 'Call A.method(1,2,3) ->', A1.method(1,2,3)
''' A static method does not receive an implicit first argument.
It (i.e. the static method) can be called either on the class
(such as A.method()) or on an instance (such as A().method()). The
instance is ignored except for its class.
'''
class A2(object):
@staticmethod
def method(*argv): return argv
a = A2()
print 'Call a.method() ->', a.method()
print 'Call a.method(1,2,3) ->', a.method(1,2,3)
print 'Call A.method(1,2,3) ->', A2.method(1,2,3) ## static call
print '''
!! Note that no class instance is is passed to the call
'''
''' So in a normal (class) bound method call, the instance is passed implicitely
as the first argument, whereas for a static method no implicit arguments are
passed; the method is invoked via scoping the class name.
There is a third option, where we do pass an implicit first argument, but
NOT an instance; instead the argument is the class type itself. That is,
A class method receives the class as implicit first argument, just like an
instance method receives the instance.
It can be called either on the class (such as A3.method()) or on an
instance (such as A3().method()). The instance is ignored except for its
class. If a class method is called for a derived class, the derived class
object is passed as the implied first argument.
'''
class A3(object):
@classmethod
def method(*argv): return argv
a = A3()
print 'Call a.method() ->', a.method()
print 'Call a.method(1,2,3) | ->', a.method(1,2,3)
print 'Call A.method(1,2,3) ->', A3.method(1,2,3)
print '''
!! N | ote that The class object (i.e. something like <class '__main__.A3'>)
!! is (implicitely) passed as the first argument.
'''
|
import m | fr
import json
from tests im | port utils
from tornado import testing
class TestRenderHandler(utils.HandlerTestCase):
@testing.gen_test
def test_options_skips_prepare(self):
# Would crash b/c lack of mocks
yield self.http_client.fetch(
self.get_url('/render'),
method='OPTIONS'
)
|
)
from applications.api import get_or_create_bootcamp_application
from applications.filters import ApplicationStepSubmissionFilterSet
from applications.models import (
ApplicantLetter,
ApplicationStepSubmission,
BootcampApplication,
)
from cms.models import LetterTemplatePage
from ecommerce.models import Order
from klasses.models import BootcampRun
from main.permissions import UserIsOwnerPermission, UserIsOwnerOrAdminPermission
from main.utils import serializer_date_format
class BootcampApplicationViewset(
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
mixins.CreateModelMixin,
viewsets.GenericViewSet,
):
"""
View for fetching users' serialized bootcamp application(s)
"""
authentication_classes | = (SessionAuthentication,)
permission_classes = (IsAuthenticated, UserIsOwnerOrAdminPermission)
owner_field = "user"
def get_query | set(self):
if self.action == "retrieve":
return BootcampApplication.objects.prefetch_state_data()
else:
return (
BootcampApplication.objects.prefetch_related(
Prefetch(
"orders", queryset=Order.objects.filter(status=Order.FULFILLED)
)
)
.filter(user=self.request.user)
.select_related("bootcamp_run__bootcamprunpage", "user")
.prefetch_related("bootcamp_run__certificates", "user__enrollments")
.order_by("-created_on")
)
def get_serializer_context(self):
added_context = {}
if self.action == "list":
added_context = {"include_page": True, "filtered_orders": True}
return {**super().get_serializer_context(), **added_context}
def get_serializer_class(self):
if self.action == "retrieve":
return BootcampApplicationDetailSerializer
elif self.action in {"list", "create"}:
return BootcampApplicationSerializer
raise MethodNotAllowed("Cannot perform the requested action.")
def create(self, request, *args, **kwargs):
bootcamp_run_id = request.data.get("bootcamp_run_id")
if not bootcamp_run_id:
raise ValidationError("Bootcamp run ID required.")
if not BootcampRun.objects.filter(id=bootcamp_run_id).exists():
return Response(
data={"error": "Bootcamp does not exist"},
status=status.HTTP_404_NOT_FOUND,
)
application, created = get_or_create_bootcamp_application(
user=request.user, bootcamp_run_id=bootcamp_run_id
)
serializer_cls = self.get_serializer_class()
return Response(
data=serializer_cls(instance=application).data,
status=(status.HTTP_201_CREATED if created else status.HTTP_200_OK),
)
class ReviewSubmissionPagination(LimitOffsetPagination):
"""Pagination class for ReviewSubmissionViewSet"""
default_limit = 10
max_limit = 1000
facets = {}
def paginate_queryset(self, queryset, request, view=None):
"""Paginate the queryset"""
self.facets = self.get_facets(queryset)
return super().paginate_queryset(queryset, request, view=view)
def get_paginated_response(self, data):
"""Return a paginationed response, including facets"""
return Response(
OrderedDict(
[
("count", self.count),
("next", self.get_next_link()),
("previous", self.get_previous_link()),
("results", data),
("facets", self.facets),
]
)
)
def get_facets(self, queryset):
"""Return a dictionary of facets"""
statuses = (
queryset.values("review_status")
.annotate(count=Count("review_status"))
.order_by("count")
)
qs = (
queryset.values("bootcamp_application__bootcamp_run")
.filter(bootcamp_application__bootcamp_run=OuterRef("pk"))
.order_by()
.annotate(count=Count("*"))
.values("count")
)
bootcamp_runs = (
BootcampRun.objects.values("id", "title", "start_date", "end_date")
.annotate(count=Subquery(qs, output_field=IntegerField()))
.filter(count__gte=1)
.distinct()
)
return {"review_statuses": statuses, "bootcamp_runs": bootcamp_runs}
class ReviewSubmissionViewSet(
SerializerExtensionsAPIViewMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet,
):
"""
Admin view for managing application submissions
"""
authentication_classes = (SessionAuthentication,)
serializer_class = SubmissionReviewSerializer
permission_classes = (IsAdminUser,)
queryset = (
ApplicationStepSubmission.objects.filter(
Q(submission_status=SUBMISSION_STATUS_SUBMITTED)
& Q(bootcamp_application__state__in=REVIEWABLE_APP_STATES)
& Q(bootcamp_application__bootcamp_run__end_date__gte=now_in_utc())
)
.select_related(
"bootcamp_application__user__profile",
"bootcamp_application__user__legal_address",
)
.prefetch_related("content_object")
)
filterset_class = ApplicationStepSubmissionFilterSet
filter_backends = [DjangoFilterBackend, OrderingFilter]
pagination_class = ReviewSubmissionPagination
ordering_fields = ["created_on"]
ordering = "created_on"
class UploadResumeView(GenericAPIView):
"""
View for uploading resume and linkedin URL
"""
authentication_classes = (SessionAuthentication,)
permission_classes = (IsAuthenticated, UserIsOwnerPermission)
lookup_field = "pk"
owner_field = "user"
queryset = BootcampApplication.objects.all()
serializer_class = BootcampApplicationDetailSerializer
def post(self, request, *args, **kwargs):
"""
Update the application with resume and/or linkedin URL
"""
application = self.get_object()
linkedin_url = request.data.get("linkedin_url")
resume_file = request.FILES.get("file")
if linkedin_url is None and resume_file is None and not application.resume_file:
raise ValidationError("At least one form of resume is required.")
if linkedin_url:
self.validate_linkedin_url(linkedin_url)
application.add_resume(resume_file=resume_file, linkedin_url=linkedin_url)
# when state transition happens need to save manually
application.save()
return Response(
{
"resume_url": (
application.resume_file.url if application.resume_file else None
),
"linkedin_url": application.linkedin_url,
"resume_upload_date": serializer_date_format(
application.resume_upload_date
),
},
status=status.HTTP_200_OK,
)
def validate_linkedin_url(self, linkedin_url):
"""
Validate that a LinkedIn URL has the right format and length
Args:
linkedin_url (string): LinkedIn URL of a user
"""
if len(linkedin_url) > 200:
raise ValidationError(
{"errors": "The URL should be less than 200 characters."}
)
regex = re.compile(
"^(http|https)://" # Support for both http and https
"([a-zA-Z]{2,3}[.]|)" # Support for global or localized prefix
"linkedin[.]" # Contains the linkedin domain
"([a-zA-Z]{2,3})/" # Support for .com or localized postfix
"+([a-zA-Z0-9-_])" # Support for /<in or org>
"+/+([a-zA-Z0-9-_])+.*$", # Any type of username
re.I,
)
if not regex.match(str(linkedin_url)):
raise ValidationError({"errors": "Please enter a valid LinkedIn URL"})
class LettersView(TemplateView):
"""
Render a l |
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2014 Jonathan Labéjof <jonathan.labejof@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ---------------------------------------------------- | ----------------
"""Unit tests tools."""
from unittest import TestCase
from six import string_types, PY2
from .version import PY26
from re import match
__all__ = ['UTCase']
def _subset(subset, superset):
"""True if subset is a subset of superset.
:param dict subset: subset to compare.
:param dict superset: superset to compare.
:return: True iif all pairs (key, value) of subset are in superset.
:rtype: bool
"""
result = True
| for k in subset:
result = k in superset and subset[k] == superset[k]
if not result:
break
return result
class UTCase(TestCase):
"""Class which enrichs TestCase with python version compatibilities."""
def __init__(self, *args, **kwargs):
super(UTCase, self).__init__(*args, **kwargs)
if PY2: # python 3 compatibility
if PY26: # python 2.7 compatibility
def assertIs(self, first, second, msg=None):
return self.assertTrue(first is second, msg=msg)
def assertIsNot(self, first, second, msg=None):
return self.assertTrue(first is not second, msg=msg)
def assertIn(self, first, second, msg=None):
return self.assertTrue(first in second, msg=msg)
def assertNotIn(self, first, second, msg=None):
return self.assertTrue(first not in second, msg=msg)
def assertIsNone(self, expr, msg=None):
return self.assertTrue(expr is None, msg=msg)
def assertIsNotNone(self, expr, msg=None):
return self.assertFalse(expr is None, msg=msg)
def assertIsInstance(self, obj, cls, msg=None):
return self.assertTrue(isinstance(obj, cls), msg=msg)
def assertNotIsInstance(self, obj, cls, msg=None):
return self.assertTrue(not isinstance(obj, cls), msg=msg)
def assertGreater(self, first, second, msg=None):
return self.assertTrue(first > second, msg=msg)
def assertGreaterEqual(self, first, second, msg=None):
return self.assertTrue(first >= second, msg=msg)
def assertLess(self, first, second, msg=None):
self.assertTrue(first < second, msg=msg)
def assertLessEqual(self, first, second, msg=None):
return self.assertTrue(first <= second, msg=msg)
def assertRegexpMatches(self, text, regexp, msg=None):
return self.assertTrue(
match(regexp, text) if isinstance(regexp, string_types)
else regexp.search(text),
msg=msg
)
def assertNotRegexpMatches(self, text, regexp, msg=None):
return self.assertIsNone(
match(regexp, text) if isinstance(regexp, string_types)
else regexp.search(text),
msg=msg
)
def assertItemsEqual(self, actual, expected, msg=None):
return self.assertEqual(
sorted(actual), sorted(expected), msg=msg
)
def assertDictContainsSubset(self, expected, actual, msg=None):
return self.assertTrue(_subset(expected, actual), msg=msg)
def assertCountEqual(self, first, second, msg=None):
return self.assertEqual(len(first), len(second), msg=msg)
def assertRegex(self, text, regexp, msg=None):
return self.assertRegexpMatches(text, regexp, msg)
def assertNotRegex(self, text, regexp, msg=None):
return self.assertNotRegexpMatches(text, regexp, msg)
else: # python 2 compatibility
def assertRegexpMatches(self, *args, **kwargs):
return self.assertRegex(*args, **kwargs)
def assertNotRegexpMatches(self, *args, **kwargs):
return self.assertNotRegex(*args, **kwargs)
def assertItemsEqual(self, actual, expected, msg=None):
return self.assertEqual(sorted(actual), sorted(expected), msg=msg)
def assertCountEqual(self, actual, expected, msg=None):
return self.assertEqual(sorted(actual), sorted(expected), msg=msg)
def assertDictContainsSubset(self, expected, actual, msg=None):
return self.assertTrue(_subset(expected, actual), msg=msg)
|
n
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and c | ustomize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_op | tions = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'javauserguidedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'javauserguide.tex', u'javauserguide Documentation',
u'Patrick Baird', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'javauserguide', u'javauserguide Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'javauserguide', u'javauserguide Documentation',
author, 'javauserguide', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'def |
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the spec | ific language | governing permissions and
# limitations under the License.
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.tcm.v20210413 import models
class TcmClient(AbstractClient):
_apiVersion = '2021-04-13'
_endpoint = 'tcm.tencentcloudapi.com'
_service = 'tcm'
def DescribeMesh(self, request):
"""查询网格详情
:param request: Request instance for DescribeMesh.
:type request: :class:`tencentcloud.tcm.v20210413.models.DescribeMeshRequest`
:rtype: :class:`tencentcloud.tcm.v20210413.models.DescribeMeshResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeMesh", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeMeshResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeMeshList(self, request):
"""查询网格列表
:param request: Request instance for DescribeMeshList.
:type request: :class:`tencentcloud.tcm.v20210413.models.DescribeMeshListRequest`
:rtype: :class:`tencentcloud.tcm.v20210413.models.DescribeMeshListResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeMeshList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeMeshListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) |
from __future__ import division
import myhdl
from myhdl import instance, delay
ClockList = []
class Clock(myhdl.SignalType):
def __init__(self, val, frequency=1, timescale='1ns'):
self._frequency = frequency
self._period = 1/frequency
self._timescale = timescale
self._hticks = 0
self._set_hticks()
myhdl.SignalType.__init__(self, bool(val))
Cl | ockList.append(self)
@property
def ti | mescale(self):
return self._timescale
@timescale.setter
def timescale(self, t):
self._timescale = t
@property
def frequency(self):
return self._frequency
@frequency.setter
def frequency(self, f):
self._frequency = f
self._period = 1/f
self._set_hticks()
@property
def period(self):
return self._period
def _set_hticks(self):
# self._nts = self._convert_timescale(self._timescale)
# self._hticks = int(round(self._period/self._nts))
self._hticks = 5
def _convert_timescale(self, ts):
# @todo: need to complete this, ts is in the form
# "[0-9]*["ms","us","ns","ps"], parse the text
# format and retrieve a numerical value
# separate the numerical and text
nts = 1e9
return nts
def gen(self, hticks=None):
if hticks is None:
hticks = self._hticks
else:
self._hticks = hticks
# print('hticks %d'%(hticks))
@instance
def gclock():
self.next = False
while True:
yield delay(hticks)
self.next = not self.val
return gclock
class Reset(myhdl.ResetSignal):
def __init__(self, val, active, async):
myhdl.ResetSignal.__init__(self, val, active, async)
def pulse(self, delays=10):
if isinstance(delays, int):
self.next = self.active
yield delay(delays)
self.next = not self.active
elif isinstance(delays, tuple):
assert len(delays) in (1, 2, 3), "Incorrect number of delays"
self.next = not self.active if len(delays) == 3 else self.active
for dd in delays:
yield delay(dd)
self.next = not self.val
self.next = not self.active
else:
raise ValueError("{} type not supported".format(type(delays)))
|
"""
model.py
by Ted Morin
contains a function to predict 10-year Atrial Fibrilation risks using beta coefficients from
10.1016:S0140-6736(09)60443-8
2010 Development of a Risk Score for Atrial Fibrillation in the Community
Framingham Heart Study
translated and optimized from FHS online risk calculator's javascript
function expects parameters of
"Male Sex" "Age" "BMI" "Systolic BP" "Antihypertensive Medication Use" "PR Interval" "Sig. Murmur" "Prev Heart Fail"
years kg/m^2 mm Hg mSec
bool int/float int/float int/float bool int/float bool bool
"""
"""
# originally part of the function, calculates xbar_value
xbar_values = np.array([
0.4464, # gender
60.9022, # age
26.2861, # bmi
136.1674, # sbp
0.2413, # hrx
16.3901, # pr_intv
0.0281, # vhd
0.0087, # hxchf
3806.9000, # age2
1654.6600, # gender_age2
1.8961, # age_vhd
0.6100 # age_hxchf
])
xbar_value = np.dot(xbar_values,betas) # this constant should be hard coded like s0!
# (and now it is)
"""
def model(ismale, age, bmi, sbp, antihyp, pr_intv, sigmurm, phf):
# convert seconds to milliseconds as used in regression
pr_intv = pr_intv * 1000.0
# inexplicable conversion
pr_intv = pr_intv / 10.0
# this was done in the js, and the output seems much more realistic than otherwise, but it seems inexplicable!
# perhaps the coefficient shown in FHS's website is erroneous? Or uses the wrong units? It is hard to say.
import numpy as np
# betas
betas = np.array([
1.994060, #gender
0.150520, #age
0.019300, #bmi Body Mass Index
0.00615, #sbp Systolic Blood Pressure
0.424100, #hrx Treatment for hypertension
0.070650, #pr_intv PR interval
3.795860, #vhd Significant Murmur
9.428330, #hxchf Prevalent Heart Fa | ilure
-0.000380, #age2 age squared
-0.000280, #gender_age2 male gender times age squared
-0.042380, #age_vhd age times murmur
-0.123070 #age_hxchf age times prevalent heart failure
])
s0 = 0.96337 # "const is from the spreadsheet"
xbar_value = 10.785528582
values = [ismale, age, bmi, sbp, antihyp, pr_intv, | sigmurm, phf]
# calculate derived values
values.append(age*age) # age squared
values.append(ismale*age*age) # gender times age squared
values.append(sigmurm*age) # age times significant murmur
values.append(phf*age)
values = np.array(values)
# dot product
value = np.dot(values, betas)
# calculate using cox regression model
risk = 1.0 - np.power(s0, np.exp(value - xbar_value));
# cap at .3
#if (risk > .3) : risk = .3 # is this justified by the paper?
return risk
|
import xml.etree.ElementTree as ET
import numpy as np
import openmc
import pytest
from tests.unit_tests import assert_unbounded
def test_basic():
c1 = openmc.Cell()
c2 = openmc.Cell()
c3 = openmc.Cell()
u = openmc.Universe(name='cool', cells=(c1, c2, c3))
assert u.name == 'cool'
cells = set(u.cells.values())
assert not (cells ^ {c1, c2, c3})
# Test __repr__
repr(u)
with pytest.raises(TypeError):
u.add_cell(openmc.Material())
with pytest.raises(TypeError):
u.add_cells(c1)
u.remove_cell(c3)
cells = set(u.cells.values())
assert not (cells ^ {c1, c2})
u.clear_cells()
assert not set(u.cells)
def test_bounding_box():
cyl1 = openmc.ZCylinder(r=1.0)
cyl2 = openmc.ZCylinder(r=2.0)
c1 = openmc.Cell(region=-cyl1)
c2 = openmc.Cell(region=+cyl1 & -cyl2)
u = openmc.Universe(cells=[c1, c2])
ll, ur = u.bounding_box
assert ll == pytest.appr | ox((-2., -2., -np.inf))
assert ur == pytest.approx((2., 2., np.inf))
u = openmc.Universe()
assert_unbounded(u)
def test_plot(run_in_tmpdir, sphere_model):
m = | sphere_model.materials[0]
univ = sphere_model.geometry.root_universe
colors = {m: 'limegreen'}
for basis in ('xy', 'yz', 'xz'):
univ.plot(
basis=basis,
pixels=(10, 10),
color_by='material',
colors=colors,
)
def test_get_nuclides(uo2):
c = openmc.Cell(fill=uo2)
univ = openmc.Universe(cells=[c])
nucs = univ.get_nuclides()
assert nucs == ['U235', 'O16']
def test_cells():
cells = [openmc.Cell() for i in range(5)]
cells2 = [openmc.Cell() for i in range(3)]
cells[0].fill = openmc.Universe(cells=cells2)
u = openmc.Universe(cells=cells)
assert not (set(u.cells.values()) ^ set(cells))
all_cells = set(u.get_all_cells().values())
assert not (all_cells ^ set(cells + cells2))
def test_get_all_materials(cell_with_lattice):
cells, mats, univ, lattice = cell_with_lattice
test_mats = set(univ.get_all_materials().values())
assert not (test_mats ^ set(mats))
def test_get_all_universes():
c1 = openmc.Cell()
u1 = openmc.Universe(cells=[c1])
c2 = openmc.Cell()
u2 = openmc.Universe(cells=[c2])
c3 = openmc.Cell(fill=u1)
c4 = openmc.Cell(fill=u2)
u3 = openmc.Universe(cells=[c3, c4])
univs = set(u3.get_all_universes().values())
assert not (univs ^ {u1, u2})
def test_create_xml(cell_with_lattice):
cells = [openmc.Cell() for i in range(5)]
u = openmc.Universe(cells=cells)
geom = ET.Element('geom')
u.create_xml_subelement(geom)
cell_elems = geom.findall('cell')
assert len(cell_elems) == len(cells)
assert all(c.get('universe') == str(u.id) for c in cell_elems)
assert not (set(c.get('id') for c in cell_elems) ^
set(str(c.id) for c in cells))
|
"""
.. _ex-inverse-source-power:
==========================================
Compute source power using DICS beamformer
==========================================
Compute a Dynamic Imaging of Coherent Sources (DICS) :footcite:`GrossEtAl2001`
filter from single-trial activity to estimate source power across a frequency
band. This example demonstrates how to source localize the event-related
synchronization (ERS) of beta band activity in the
:ref:`somato dataset <somato-dataset>`.
"""
# Author: Marijn van Vliet <w.m.vanvliet@gmail.com>
# Roman Goj <roman.goj@gmail.com>
# Denis Engemann <denis.engemann@gmail.com>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
#
# License: BSD-3-Clause
# %%
import os.path as op
import numpy as np
import mne
from mne.datasets import somato
from mne.time_frequency import csd_morlet
from mne.beamformer import make_dics, apply_dics_csd
print(__doc__)
# %%
# Reading the raw data and creating epochs:
data_path = somato.data_path()
subject = '01'
task = 'somato'
raw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg',
'sub-{}_task-{}_meg.fif'.format(subject, task))
# Use a shorter segment of raw just for speed here
raw = mne.io.read_raw_fif(raw_fname)
raw.crop(0, 120) # one minute for speed (looks similar to using all ~800 sec)
# Read epochs
events = mne.find_events(raw)
epochs = mne.Epochs(raw, events, event_id=1, tmin=-1.5, tmax=2, preload=True)
del raw
# Paths to forward operator and FreeSurfer subject directory
fname_fwd = op.join(data_path, 'derivatives', 'sub-{}'.format(subject),
'sub-{}_task-{}-fwd.fif'.format(subject, task))
subjects_dir = op.join(data_path, 'derivatives', 'freesurfer', 'subjects')
# %%
# We are interested in the beta band. Define a range of frequencies, using a
# log scale, from 12 to 30 Hz.
freqs = np.logspace(np.log10(12), np.log10(30), 9)
# %%
# Computing the cross-spectral density matrix for the beta frequency band, for
# different time intervals. We use a decim value of 20 to speed up the
# computation in this example at the loss of accuracy.
csd = csd_morlet(epochs, freqs, tmin=-1, tmax=1.5, decim=20)
csd_baseline = csd_morlet(epochs, freqs, tmin=-1, tmax=0, decim=20)
# ERS activity starts at 0.5 seconds after stimulus onset
csd_ers = csd_morlet(epochs, freqs, tmin=0.5, tmax=1.5, decim=20)
info = epochs.info
del epochs
# %%
# To compute the source power for a frequency band, rather than each frequency
# separately, we average the CSD objects across frequencies.
csd = csd.mean()
csd_baseline = csd_baseline.mean()
csd_ers = csd_ers.mean()
# %%
# Computing DICS spatial filters using the CSD that was computed on the entire
# timecourse.
fwd = mne.read_forward_solution(fname_fwd)
filters = make_dics(info, fwd, csd, noise_csd=csd_baseline,
pick_ori='max-power', reduce_rank=True, real_filter=True)
del fwd
# %%
# Applying DICS spatial filters separately to the CSD computed using the
# baseline an | d the CSD computed during the ERS activity.
baseline_source_power, freqs = apply_dics_csd(csd_baseline, fi | lters)
beta_source_power, freqs = apply_dics_csd(csd_ers, filters)
# %%
# Visualizing source power during ERS activity relative to the baseline power.
stc = beta_source_power / baseline_source_power
message = 'DICS source power in the 12-30 Hz frequency band'
brain = stc.plot(hemi='both', views='axial', subjects_dir=subjects_dir,
subject=subject, time_label=message)
# %%
# References
# ----------
# .. footbibliography::
|
"""
This is for taking a json file and putting it into a shapefile.
It uses the generic functions written by mfoley.
"""
from geo_utils import get_data_from_geoserver
import json
import six
from fiona import collection
import pyproj
from shapely.geometry import Point, mapping
op_file = "museums.shp"
server = "mf2.dit.ie:8080"
dbase = "dit:dublin_museums"
crs_from = pyproj.Proj("+init=EPSG:4326")
crs_to = pyproj.Proj("+init=EPSG:2157")
museums = get_data_from_geoserver(server, dbase)
pts = {}
for place in museums['features']:
pts[place['properties']['name']] = (place['geometry']['coordinates'])
schema = { 'geometry': 'Point', 'properties': { 'name': 'str' } }
with collection(
op_file, "w", "ESRI Shapefile", schema) as output:
for k, v in pts.items():
x, y = pyproj.transform(crs_from, crs_to, v[0], v[1])
point = Point(x, y)
output.write({'properties': {'name': k},'geometry': mapping(point)})
def make_a_shapefile(source, *dest):
if isinstance(source, dict) and source["type | "] == "FeatureCollection":
print('This is a FC')
if len(source["features"]):
print('There is gj_stack')
if isinstance(source, list): |
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.