prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
impor | t _plotly_utils.basevalidators
class NticksValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="nticks", parent_name="layout.ternary.baxis", **kwargs
):
super(NticksValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
min= | kwargs.pop("min", 1),
**kwargs
)
|
from pydatastream import Datastream
import json
import datetime
import sys
import os.path
#hardcoded directories
dir_input = "input/"
dir_output = "output/"
#check that the login credentials and input file location are being passed in
numOfArgs = len(sys.argv) - 1
if numOfArgs != 3:
print "Please run this python script with username,password and input file location in that order respectively."
exit()
#Setup login credentials and input file location
username = str(sys.argv[1])
pw = str(sys.argv[2])
input_f | ile_loc = dir_input + str(sys.argv[3])
#Ensure that the input file location exists
if ( not os.path.isfile(str(input_file_loc)) ):
print "The file " + str(input_file_loc) + " does not exist."
exit()
#login credentials to datastream
DWE = Datastream(username=username,password=pw)
#other info from datastream
info = DWE.system_info()
subscribed_sources = DWE.sources()
#replace missing data with NaNs
DWE.raise_on_error = False
#get all codes, groups, start dates from input file |
with open(input_file_loc,'r') as input_file:
symbol_ref = json.load(input_file)
#download timestamp
download_date = {'Custom_Download_Date' : datetime.datetime.now().isoformat()}
#calculate time taken for entire process
time_taken = datetime.datetime.now()
time_taken = time_taken - time_taken
for desc,desc_value in symbol_ref.iteritems():
for group,group_value in desc_value.iteritems():
#create list for custom fields
custom_fields = list()
for code_key,code_value in group_value.iteritems():
for key,value in code_value.iteritems():
if(key == 'code'):
search_code = value
search_symbol = {'Custom_Ticker' : value}
if(key == 'start_date'):
start_date = value
if(key == 'custom_field'):
custom_fields[:] = []
custom_fields.append(value)
startTime = datetime.datetime.now()
#send request to retrieve the data from Datastream
req = DWE.fetch(str(search_code),custom_fields,date_from=str(start_date),only_data=False)
time_taken = time_taken + datetime.datetime.now() - startTime
#format date and convert to json
raw_json = req[0].to_json(date_format='iso')
raw_metadata = req[1].to_json()
#Data cleaning and processing
#remove the time component including the '.' char from the key values of datetime in the data
raw_json = raw_json.replace("T00:00:00.000Z","")
#replace the metadata's keys from "0" to "default_ws_key"
raw_metadata = raw_metadata.replace("\"0\"","\"Custom_WS_Key\"")
#combine the data and the metadata about the code
allData_str = json.loads(raw_json)
metadata_str = json.loads(raw_metadata)
datastream_combined = {key : value for (key,value) in (allData_str.items() + metadata_str.items())}
#create symbol json string and append to data
data_with_symbol = {key : value for (key,value) in (search_symbol.items() + datastream_combined.items())}
#append group
group_code = {'Custom_Group' : group}
data_with_group = {key : value for (key,value) in (group_code.items() + data_with_symbol.items())}
#append category
category = {'Custom_Description' : desc}
data_with_category = {key : value for (key,value) in (category.items() + data_with_group.items())}
#append download timestamp
final_data = {key : value for (key,value) in (download_date.items() + data_with_category.items())}
final_data_json = json.dumps(final_data)
#decode to the right format for saving to disk
json_file = json.JSONDecoder().decode((final_data_json))
#save to json file on server
if(len(group_value) > 1):
filename = dir_output + desc + '_' + group + '_' + code_key + '.json'
else:
filename = dir_output + desc + '_' + group + '.json'
with open(filename,'w') as outfile:
json.dump(json_file,outfile,sort_keys=True)
print "time taken for " + str(sys.argv[3]) + " to be retrieved: " + str(time_taken)
|
import unittest
from .Weather_analyzer import is_not_number
class BtcPriceTestCase(unittest.TestCase):
d | ef test_checking_of_input_in_form(self):
input = 46
answer = is_not_n | umber(input) # The bitcoin returned changes over time!
self.assertEqual(answer, False) |
import datetime
from django.shortcuts import render_to_response, get_object_or_404, HttpResponse, HttpResponseRedirect, Http404
from django.template import RequestContext
from django.core.urlresolvers import reverse
from articles.models import Article
from taxonomy.models import TaxonomyMap
from core.views import update_online_users
@update_online_users
def index(request):
articles = Article.objects.all()[:10]
return render_to_response('articles/index.html', {'articles': articles}, context_instance = RequestContext(request))
@update_online_users
def category(request, category_id):
article_ids = TaxonomyMap.objects.filter(term__id = category_id, type_ | _type = 'Category', content_type__model = 'article').values_list('object_id', flat = | True)
category_title = TaxonomyMap.objects.filter(term__id = category_id, type__type = 'Category', content_type__model = 'article')[0].term.term
articles = Article.objects.filter(id__in = article_ids)
return render_to_response('articles/category.html', {'category_id': category_id, 'category_title': category_title, 'articles': articles}, context_instance = RequestContext(request))
@update_online_users
def details(request, title_slug):
article = get_object_or_404(Article, title_slug = title_slug)
return render_to_response('articles/details.html', {'article': article}, context_instance = RequestContext(request)) |
_name') or \
'weechat'
completions[plugin][completion_item]['description'] = \
weechat.infolist_string(infolist, 'description')
weechat.infolist_free(infolist)
return completions
def get_url_options():
"""
Get list of completions hooked by plugins in a dict with 3 indexes:
plugin, item, xxx.
"""
url_options = []
infolist = weechat.infolist_get('url_options', '', '')
while weechat.infolist_next(infolist):
url_options.append({
'name': weechat.infolist_string(infolist, 'name').lower(),
'option': weechat.infolist_integer(infolist, 'option'),
'type': weechat.infolist_string(infolist, 'type'),
'constants': weechat.infolist_string(
infolist, 'constants').lower().replace(',', ', ')
})
weechat.infolist_free(infolist)
return url_options
def update_file(oldfile, newfile, num_files, num_files_updated, obj):
"""Update a doc file."""
try:
with open(oldfile, 'r') as _file:
shaold = hashlib.sha224(_file.read()).hexdigest()
except IOError:
shaold = ''
try:
with open(newfile, 'r') as _file:
shanew = hashlib.sha224(_file.read()).hexdigest()
except IOError:
shanew = ''
if shaold != shanew:
if os.path.exists(oldfile):
os.unlink(oldfile)
os.rename(newfile, oldfile)
num_files_updated['total1'] += 1
num_files_updated['total2'] += 1
num_files_updated[obj] += 1
else:
if os.path.exists(oldfile):
os.unlink(newfile)
num_files['total1'] += 1
num_files['total2'] += 1
num_files[obj] += 1
# pylint: disable=too-many-locals, too-many-branches, too-many-statements
def docgen_cmd_cb(data, buf, args):
"""Callback for /docgen command."""
if args:
locales = args.split(' ')
else:
locales = LOCALE_LIST
commands = get_commands()
options = get_options()
infos = get_infos()
infos_hashtable = get_infos_hashtable()
infolists = get_infolists()
hdata = get_hdata()
completions = get_completions()
url_options = get_url_options()
# get path and replace ~ by home if needed
path = weechat.config_get_plugin('path')
if path.startswith('~'):
path = os.environ['HOME'] + path[1:]
# write to doc files, by locale
num_files = defaultdict(int)
num_files_updated = defaultdict(int)
# pylint: disable=undefined-variable
translate = lambda s: (s and _(s)) or s
escape = lambda s: s.replace('|', '\\|')
for locale in locales:
for key in num_files:
if key != 'total2':
num_files[key] = 0
num_files_updated[key] = 0
trans = gettext.translation('weechat',
weechat.info_get('weechat_localedir', ''),
languages=[locale + '.UTF-8'],
fallback=True)
trans.install()
directory = path + '/' + locale[0:2] + '/autogen'
if not os.path.isdir(directory):
weechat.prnt('',
'{0}docgen error: directory "{1}" does not exist'
''.format(weechat.prefix('error'), directory))
continue
# write commands
for plugin in commands:
filename = directory + '/user/' + plugin + '_commands.asciidoc'
tmpfilename = filename + '.tmp'
_file = open(tmpfilename, 'w')
for command in sorted(commands[plugin]):
_cmd = commands[plugin][command]
args = translate(_cmd['args'])
args_formats = args.split(' || ')
desc = translate(_cmd['description'])
args_desc = translate(_cmd['args_description'])
_file.write('[[command_{0}_{1}]]\n'.format(plugin, command))
_file.write('[command]*`{0}`* {1}::\n\n'.format(command, desc))
_file.write('----\n')
prefix = '/' + command + ' '
if args_formats != ['']:
for fmt in args_formats:
_file.write(prefix + fmt + '\n')
prefix = ' ' * len(prefix)
if args_desc:
_file.write('\n')
for line in args_desc.split('\n'):
_file.write(line + '\n')
_file.write('----\n\n')
_file.close()
update_file(filename, tmpfilename, num_files, num_files_updated,
'commands')
# write config options
for config in options:
filename = directory + '/user/' + config + '_options.asciidoc'
tmpfilename = filename + '.tmp'
_file = open(tmpfilename, 'w')
for section in sorted(options[config]):
for option in sorted(options[config][section]):
_opt = options[config][section][option]
opt_type = _opt['type']
string_values = _opt['string_values']
default_value = _opt['default_value']
opt_min = _opt['min']
opt_max = _opt['max']
null_value_allowed = _opt['null_value_allowed']
desc = translate(_opt['description'])
type_nls = translate(opt_type)
values = ''
if opt_type == 'boolean':
values = 'on, off'
elif opt_type == 'integer':
if string_values:
values = string_values.replace('|', ', ')
else:
values = '{0} .. {1}'.format(opt_min, opt_max)
elif opt_type == 'string':
if opt_max <= 0:
values = _('any string')
elif opt_max == 1:
values = _('any char')
elif opt_max > 1:
values = '{0} ({1}: {2})'.format(_('any string'),
_('max chars'),
| opt_max)
else:
| values = _('any string')
default_value = '"{0}"'.format(
default_value.replace('"', '\\"'))
elif opt_type == 'color':
values = _('a WeeChat color name (default, black, '
'(dark)gray, white, (light)red, '
'(light)green, brown, yellow, (light)blue, '
'(light)magenta, (light)cyan), a terminal '
'color number or an alias; attributes are '
'allowed before color (for text color '
'only, not background): \"*\" for bold, '
'\"!\" for reverse, \"/\" for italic, '
'\"_\" for underline')
_file.write('* [[option_{0}.{1}.{2}]] *{3}.{4}.{5}*\n'
''.format(config, section, option, config,
section, option))
_file.write('** {0}: `{1}`\n'.format(_('description'),
desc))
_file.write('** {0}: {1}\n'.format(_('type'), type_nls))
_file.write('** {0}: {1} ({2}: `{3}`)\n'
''.format(_('values'), values,
_('default value'), default_value))
if null_value_allowed:
_file.write('** {0}\n'
''.format(
_('undefined value allowed (null)')))
_file.write('\ |
from six import iteritems
c | lass Playlist:
is_folder = False
playlist_persistent_id = None
parent_persistent_id = None
distinguished_kind = None
playlist_id = None
def __init__(self, playListName=None):
self.name = playListName
self.tracks = []
def __iter__(self):
for attr, value in iteritems(self.__dict__):
yield attr, value
def ToD | ict(self):
return {key: value for (key, value) in self}
|
#!/usr/bin/python
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: k5_novnc_console
short_description: Display the URL to the NoVNC Console
version_added: "1.0"
description:
- returns a URL to the noVNC console.
options:
server_name:
description:
- Name of the server.
required: true
default: None
k5_auth:
description:
- dict of k5_auth module output.
required: true
default: None
requirements:
- "python >= 2.6"
'''
EXAMPLES = '''
# Get novnc url
- k5_novnc_console:
server_name: test01
k5_auth: "{{ k5_auth_facts }}"
'''
RETURN = '''
k5_novnc_console_facts
description: Dictionary describing the novnc details.
returned: On success when the server is found
type: dictionary
contains:
id:
description: Router ID.
type: string
sample: "474acfe5-be34-494c-b339-50f06aa143e4"
'''
import requests
import os
import json
from ansible.module_utils.basic import *
############## Common debug ###############
k5_debug = False
k5_debug_out = []
def k5_debug_get():
"""Return our debug list"""
return k5_debug_out
def k5_debug_clear():
"""Clear our debug list"""
k5_debug_out = []
def k5_debug_add(s):
"""Add string to debug list if env K5_DEBUG is defined"""
if k5_debug:
k5_debug_out.append(s)
############## functions #############
def k5_get_endpoint(e,name):
"""Pull particular endpoint name from dict"""
return e['endpoints'][name]
def k5_get_server_facts(module, k5_facts):
"""Get server facts"""
endpoint = k5_fac | ts['endpoints']['compute']
auth_token = k5_facts['auth_token']
session = requests.Session()
headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': auth_token }
url = endpoint + '/servers/detail'
k5_debug_add('endpoint: {0}'.format(endpoint))
k5_debug_add('REQ: {0}'.format(url))
k5_debug_add('headers: {0}'.format(headers))
try:
| response = session.request('GET', url, headers=headers)
except requests.exceptions.RequestException as e:
module.fail_json(msg=e)
# we failed to get data
if response.status_code not in (200,):
module.fail_json(msg="RESP: HTTP Code:" + str(response.status_code) + " " + str(response.content), debug=k5_debug_out)
if 'servers' in response.json():
return response.json()
else:
module.fail_json(msg="Missing servers in response to server details request")
def k5_get_novnc_console(module):
"""Get novnc url"""
global k5_debug
k5_debug_clear()
if 'K5_DEBUG' in os.environ:
k5_debug = True
if 'auth_spec' in module.params['k5_auth']:
k5_facts = module.params['k5_auth']
else:
module.fail_json(msg="k5_auth_facts not found, have you run k5_auth?")
endpoint = k5_facts['endpoints']['compute']
auth_token = k5_facts['auth_token']
server_name = module.params['server_name']
# we need the server_id not server_name, so grab it
server_facts = k5_get_server_facts(module, k5_facts)
server_id = ''
for s in server_facts['servers']:
if s['name'] == server_name:
server_id = s['id']
break
if server_id == '':
if k5_debug:
module.exit_json(changed=False, msg="Server " + server_name + " not found", debug=k5_debug_out)
else:
module.exit_json(changed=False, msg="Server " + server_name + " not found")
k5_debug_add('auth_token: {0}'.format(auth_token))
k5_debug_add('server_name: {0}'.format(server_name))
session = requests.Session()
headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': auth_token }
url = endpoint + '/servers/' + server_id + '/action'
query_json = { 'os-getVNCConsole': {'type': 'novnc' }}
k5_debug_add('endpoint: {0}'.format(endpoint))
k5_debug_add('REQ: {0}'.format(url))
k5_debug_add('headers: {0}'.format(headers))
k5_debug_add('json: {0}'.format(query_json))
try:
response = session.request('POST', url, headers=headers, json=query_json)
except requests.exceptions.RequestException as e:
module.fail_json(msg=e)
# we failed to make a change
if response.status_code not in (200,):
module.fail_json(msg="RESP: HTTP Code:" + str(response.status_code) + " " + str(response.content), debug=k5_debug_out)
if k5_debug:
module.exit_json(changed=True, msg="Get URL Successful", k5_novnc_console_facts=response.json(), debug=k5_debug_out )
module.exit_json(changed=True, msg="Get URL Successful", k5_novnc_console_facts=response.json() )
######################################################################################
def main():
module = AnsibleModule( argument_spec=dict(
server_name = dict(required=True, default=None, type='str'),
k5_auth = dict(required=True, default=None, type='dict')
) )
k5_get_novnc_console(module)
######################################################################################
if __name__ == '__main__':
main()
|
"""May return true if an empty argument list is to be generated even if
the document contains none.
"""
return self.objtype == 'function'
def handle_signature(self, sig, signode):
"""Transform a Yacas signature into RST nodes.
Return (fully qualified name of the thing, classname if any).
If inside a class, the current class name is handled intelligently:
* it is stripped from the displayed name if present
* it is added to the full name (return value) if not present
"""
m = yacas_sig_re.match(sig)
if m is None:
raise ValueError
syntax, name, arglist = m.groups()
add_module = False
fullname = name
signode['fullname'] = fullname
sig_prefix = self.get_signature_prefix(sig)
if sig_prefix:
signode += addnodes.desc_annotation(sig_prefix, sig_prefix)
if add_module and self.env.config.add_module_names:
modname = self.options.get(
'module', self.env.temp_data.get('ys:module'))
if modname:
nodetext = modname + '.'
signode += addnodes.desc_addname(nodetext, nodetext)
anno = self.options.get('annotation')
if syntax == 'prefix':
signode += addnodes.desc_name(name, name)
signode += addnodes.desc_type(arglist, arglist)
return fullname, ''
if syntax == 'infix':
left, right = arglist.split(',')
left = left + ' '
right = ' ' + right
signode += addnodes.desc_type(left, left)
signode += addnodes.desc_name(name, name)
signode += addnodes.desc_type(right, right)
return fullname, ''
if syntax == 'postfix':
signode += addnodes.desc_type(arglist, arglist)
signode += addnodes.desc_name(name, name)
return fullname, ''
signode += addnodes.desc_name(name, name)
if not arglist:
if self.needs_arglist():
# for callables, add an empty parameter list
signode += addnodes.desc_parameterlist()
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, ''
if (syntax == 'bodied'):
body = arglist.split(',')[0]
arglist = str.join(',', arglist.split(',')[1:])
_pseudo_parse_arglist(signode, arglist)
if (syntax == 'bodied'):
signode += addnodes.desc_type(' ' + body, ' ' + body)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, ''
def get_index_text(self, modname, name):
"""Return the text for the index entry of the object."""
if self.objtype == 'function':
return _('%s()') % name[0]
elif self.objtype == 'data':
return _('%s') % name[0]
else:
return ''
def add_target_and_index(self, name_cls, sig, signode):
modname = self.options.get(
'module', self.env.temp_data.get('ys:module'))
fullname = (modname and modname + '.' or '') + name_cls[0]
# note target
if fullname not in self.state.document.ids:
signode['names'].append(fullname)
signode['ids'].append(fullname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
objects = self.env.domaindata['ys']['objects']
if fullname in objects:
self.state_machine.reporter.warning(
'duplicate object description of %s, ' % fullname +
'other instance in ' +
self.env.doc2path(objects[fullname][0]) +
', use :noindex: for one of them',
line=self.lineno)
objects[fullname] = (self.env.docname, self.objtype)
indextext = self.get_index_text(modname, name_cls)
if indextext:
self.indexnode['entries'].append(('single', indextext,
fullname, '', None))
def before_content(self):
# needed for automatic qualification of members (reset in subclasses)
self.clsname_set = False
def after_content(self):
if self.clsname_set:
self.env.temp_data['yacas:class'] = None
class YacasXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
refnode['ys:module'] = env.temp_data.get('ys:module')
refnode['ys:class'] = env.temp_data.get('ys:class')
if not has_explicit_title:
title = title.lstrip('.') # only has a meaning for the target
target = target.lstrip('~') # only has a meaning for the title
# if the first character is a tilde, don't display the module/class
# parts of the contents
if title[0:1] == '~':
title = title[1:]
dot = title.rfind('.')
if dot != -1:
title = title[dot+1:]
# if the first character is a dot, search more specific namespaces first
# else search builtins first
if target[0:1] == '.':
target = target[1:]
refnode['refspecific'] = True
return title, target
class YacasDomain(Domain):
"""Yacas language domain."""
name = 'ys'
label = 'Yacas'
object_types = {
'function': ObjType(l_('function'), 'func', 'obj'),
'data': ObjType(l_('data'), 'data', 'obj'),
}
directives = {
'function': YacasObject,#YacasModulelevel,
'data': YacasObject,#YacasModulelevel,
}
roles = {
'data': YacasXRefRole(),
'func': YacasXRefRole(fix_parens=True),
'const': YacasXRefRole(),
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
}
def clear_doc(self, docname):
for fullname, (fn, _) in list(self.data['objects'].items()):
if fn == docname:
del self.data['objects'][fullname]
def find_obj(self, env, modname, classname, name, type, searchmode=0):
"""Find a Yacas object for "name", perhaps using the given module
and/or classname. Returns a list of (name, object entry) tuples.
"""
# skip parens
if name[-2:] == '()':
name = name[:-2]
if not name:
return []
objects = self.data['objects']
matches = []
newname = None
if searchmode == 1:
objtypes = self.objtypes_for_role(type)
if objtypes is not None:
if modname and classname:
fullname = modname + '.' + classname + '.' + name
if fullname in objects and objects[fullname][1] in objtypes:
newname = fullname
if not newname:
if modname and modname + '.' + name in objects and \
objects[modname + '.' + name][1] in objtypes:
newname = modname + '.' + name
elif name in objects and objects[name][1] in objtypes:
newname = name
else:
| # "fuzzy" searching mode
| searchname = '.' + name
matches = [(oname, objects[oname]) for oname in objects
if oname.endswith(searchname)
and objects[oname][1] in objtypes]
else:
# NOTE: searching for exact match, object type is not considered
if name in objects:
newname = name
elif type == 'mod':
# only exact matches allowed for modules
return []
elif classname and classname + '.' + name in objects:
newname = classname + '.' + name
elif modname |
import unittest
from datetime import timedelta, datetime
import sys
import json
sys.path.append("../../config")
sys.path.append("../../html")
import ghObjects
import ghObjectRecipe
class testObjects(unittest.TestCase):
def setUp(self):
# nothin yet
self.test = "rad"
def test_spawnHTML(self):
# arrange
spawnName = "testspawn"
s = ghObjects.resourceSpawn()
s.spawnID = 42
s.spawnName = spawnName
s.spawnGalaxy = 1
s.resourceType = "wood_deciduous_yavin4"
s.resourceTypeName = "Yavinian Deciduous Wood"
s.containerType = "flora_structural"
s.stats.CR = 0
s.stats.CD = 0
s.stats.DR = 780
s.stats.FL = 0
s.stats.HR = 0
s.stats.MA = 560
s.stats.PE = 0
s.stats.OQ = 656
s.stats.SR = 450
s.stats.UT = 800
s.stats.ER = 0
s.percentStats.CR = None
s.percentStats.CD = None
s.percentStats.DR = 780.0/800
s.percentStats.FL = None
s.percentStats.HR = None
s.percentStats.MA = 160.0/400
s.percentStats.PE = None
s.percentStats.OQ = 656.0/1000
s.percentStats.SR = 150.0/400
s.percentStats.UT = 800.0/800
s.percentStats.ER = None
s.entered = daysago = datetime.now() - timedelta(4)
s.ente | redBy = "ioscode"
s.veri | fied = daysago = datetime.now() - timedelta(3)
s.verifiedBy = "tester"
s.unavailable = None
s.unavailableBy = None
s.maxWaypointConc = None
# act
mobileHTML = s.getMobileHTML("", 0, 0)
normalHTML = s.getHTML(0, "", "", 0, 0)
rowHTML = s.getRow(False)
invHTML = s.getInventoryObject()
spawnJSON = s.getJSON()
spawnJSON = "{ " + spawnJSON[:-2] + " }"
#assert
self.assertIn("ioscode", mobileHTML, "Username not in mobile HTML.")
self.assertIn("ioscode", normalHTML, "Username not in normal HTML.")
self.assertIn(spawnName, rowHTML, "No spawn name in row HTML.")
self.assertIn(spawnName, invHTML, "No spawn name in inventory HTML.")
try:
jsonObject = json.loads(spawnJSON)
jsonValid = True
except ValueError:
jsonValid = False
self.assertTrue(jsonValid, "Generated Spawn JSON output not valid.")
def test_recipeRender(self):
# arrage
r = ghObjectRecipe.schematicRecipe()
r.recipeID = 1
r.schematicID = "armor_segment_composite_advanced"
r.recipeName = "Test Recipe"
i1 = ghObjectRecipe.recipeIngredient("steel_kiirium", "17895", "armor_layer_weld_tabs", 8, "0", "Kiirium Steel", 455, "stuff steel")
i2 = ghObjectRecipe.recipeIngredient("copper_polysteel", "13455", "segment_mounting_tabs", 5, "0", "Polysteel Copper", 877, "This is great")
r.recipeIngredients.append(i1)
r.recipeIngredients.append(i2)
# act
slotHTML = r.getIngredientSlots()
rowHTML = r.getRow()
# assert
self.assertIn("steel_kiirium", slotHTML, "Resource id not in slot html.")
self.assertIn("Test Recipe", rowHTML, "Title not in row html.")
self.assertIn("yellow", slotHTML, "Expected quality color not present in slot HTML.")
if __name__ == '__main__':
unittest.main()
|
import json
import dnot
from mock import patch
import unittest2
class NotifierTes | t(unittest2.TestCase):
@patch("dnot.sns.connect_to_region")
def test_parameters_are_submitted(self, connect_to_region_mock):
topic = "abc"
region = "eu-west-2"
result_topic = "result"
stack_name = "stack1"
params = '{"key": "value"}'
| notifier = dnot.Notifier(sns_region=region)
notifier.publish(sns_topic_arn=topic, stack_name=stack_name, result_topic=result_topic, params=params)
connect_to_region_mock.assert_called_with(region)
message = json.loads('{{"stackName": "{0}", "notificationARN": "{1}", "region": "eu-west-1", "params": {2}}}'
.format(stack_name, result_topic, params))
connect_to_region_mock.return_value.publish.assert_called_with(
topic=topic,
message=json.dumps(message))
|
from __future__ import unicode_literals
from django.db import migrations, models
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('user', '0018_auto_20160922_1258'),
]
operation | s = [
migrations.AddField(
model_name='userprofile',
name='activity_cantons',
field=multiselectfield.db.fields.MultiSelectField(default='', verbose_name='Défi Vélo mobile', choices=[('BS', 'Basel-Stadt'), ('BE', 'Berne'), ('FR', 'Fribourg'), ('GE', 'Geneva'), ('LU', 'Lucerne'), ('NE', 'Neuchatel'), ('SG', 'St. Gallen'), ('VS', 'Valais'), ('VD', 'Vaud'), ('ZH', 'Zurich')], max_length=29),
| preserve_default=False,
),
migrations.AlterField(
model_name='userprofile',
name='affiliation_canton',
field=models.CharField(verbose_name="Canton d'affiliation", choices=[('', '---------'), ('BS', 'Basel-Stadt'), ('BE', 'Berne'), ('FR', 'Fribourg'), ('GE', 'Geneva'), ('LU', 'Lucerne'), ('NE', 'Neuchatel'), ('SG', 'St. Gallen'), ('VS', 'Valais'), ('VD', 'Vaud'), ('ZH', 'Zurich')], max_length=2),
),
]
|
from __future__ import print_function
import inspect
import numpy as np
import theano
from ..layers.advanced_activations import LeakyReLU, PReLU
from ..layers.core import Dense, Merge, Dropout, Activation, Reshape, Flatten, RepeatVector, Layer
from ..layers.core import ActivityRegularization, TimeDistributedDense, AutoEncoder, MaxoutDense
from ..layers.embeddings import Embedding, WordContextProduct
from ..layers.noise import GaussianNoise, GaussianDropout
from ..layers.normalization import BatchNormalization
from ..layers.recurrent import SimpleRNN, SimpleDeepRNN, GRU, LSTM, JZS1, JZS2, JZS3
from ..layers import containers
from .. import regularizers
from .. import constraints
def container_from_config(layer_dict):
name = layer_dict.get('name')
hasParams = False
if name == 'Merge':
mode = layer_dict.get('mode')
layers = layer_dict.get('layers')
layer_list = []
for layer in layers:
init_layer = container_from_config(layer)
layer_list.append(init_layer)
merge_layer = Merge(layer_list, mode)
return merge_layer
elif name == 'Sequential':
layers = layer_dict.get('layers')
layer_list = []
for layer in layers:
init_layer = container_from_config(layer)
layer_list.append(init_layer)
seq_layer = containers.Sequential(layer_list)
return seq_layer
elif name == 'Graph':
graph_layer = containers.Graph()
inputs = layer_dict.get('input_config')
for input in inputs:
graph_layer.add_input(**input)
nodes = layer_dict.get('node_config')
for node in nodes:
layer = container_from_config(layer_dict['nodes'].get(node['name']))
node['layer'] = layer
graph_layer.add_node(**node)
outputs = layer_dict.get('output_config')
for output in outputs:
graph_layer.add_output(**output)
return graph_layer
else:
# The case in which layer_dict represents an "atomic" layer
layer_dict.pop('name')
if 'parameters' in layer_dict:
params = layer_dict.get('parameters')
layer_dict.pop('parameters')
hasParams = True
for k, v in layer_dict.items():
# For now, this can only happen for regularizers and constraints
if isinstance(v, dict):
vname = v.get('name')
v.pop('name')
if vname in [x for x, y in inspect.getmembers(constraints, predicate=inspect.isclass)]:
layer_dict[k] = constraints.get(vname, v)
if vname in [x for x, y in inspect.getmembers(regularizers, predicate=inspect.isclass)]:
layer_dict[k] = regularizers.get(vname, v)
base_layer = get_layer(name, layer_dict)
if hasParams:
shaped_params = []
for param in params:
data = np.asarray(param.get('data'))
shape = tuple(param.get('shape'))
shaped_params.append(data.reshape(shape))
base_layer.set_weights(shaped_params)
return base_layer
def print_layer_shapes(model, | input_shapes):
"""
Utility function to print the shape of the output at each layer of a Model
Arguments:
model: instance of Model / Merge
input_shapes: dict (Graph) | , list of tuples (Merge) or tuple (Sequential)
"""
if model.__class__.__name__ in ['Sequential', 'Merge']:
# in this case input_shapes is a tuple, or a list [shape1, shape2]
if not isinstance(input_shapes[0], tuple):
input_shapes = [input_shapes]
inputs = model.get_input(train=False)
if not isinstance(inputs, list):
inputs = [inputs]
input_dummy = [np.zeros(shape, dtype=np.float32)
for shape in input_shapes]
layers = model.layers
elif model.__class__.__name__ == 'Graph':
# in this case input_shapes is a dictionary
inputs = [model.inputs[name].input
for name in model.input_order]
input_dummy = [np.zeros(input_shapes[name], dtype=np.float32)
for name in model.input_order]
layers = [model.nodes[c['name']] for c in model.node_config]
print("input shapes : ", input_shapes)
for l in layers:
shape_f = theano.function(inputs, l.get_output(train=False).shape,
on_unused_input='ignore')
out_shape = tuple(shape_f(*input_dummy))
config = l.get_config()
print('shape after %s: %s' % (config['name'], out_shape))
from .generic_utils import get_from_module
def get_layer(identifier, kwargs=None):
return get_from_module(identifier, globals(), 'layer', instantiate=True, kwargs=kwargs)
|
to to the txmanager"""
bl_idname = "rman_txmgr_list.parse_scene"
bl_label = "Parse Scene"
bl_description = "Parse the scene and look for textures that need converting."
def execute(self, context):
rman_txmgr_list = context.scene.rman_txmgr_list
texture_utils.parse_for_textures(context.scene)
texture_utils.get_txmanager().txmake_all(blocking=False)
bpy.ops.rman_txmgr_list.refresh('EXEC_DEFAULT')
return{'FINISHED'}
class PRMAN_OT_Renderman_txmanager_reset_state(Operator):
"""Reset State"""
bl_idname = "rman_txmgr_list.reset_state"
bl_label = "Reset State"
bl_description = "All texture settings will be erased and the scene will be re-parsed. All manual edits will be lost."
def execute(self, context):
rman_txmgr_list = context.scene.rman_txmgr_list
rman_txmgr_list.clear()
texture_utils.get_txmanager().txmanager.reset()
texture_utils.parse_for_textures(context.scene)
texture_utils.get_txmanager().txmake_all(blocking=False)
texture_utils.get_txmanager().txmanager.reset_state()
return{'FINISHED'}
class PRMAN_OT_Renderman_txmanager_clear_unused(Operator):
"""Clear Unused"""
bl_idname | = "rman_txmgr_list.clear_unused"
bl_label = "Clear Unused"
bl_description = "Clear unused textures"
def execute(self, context):
rman_txmgr_list = context.scene.rman_txmgr_list
nodeIDs = list()
for item in rman_txmgr_list:
nodeID = item.nodeID
if item.nodeID != "":
txfile | = texture_utils.get_txmanager().txmanager.get_txfile_from_id(item.nodeID)
if not txfile:
nodeIDs.append(nodeID)
continue
tokens = nodeID.split('|')
if len(tokens) < 3:
continue
node_name,param,ob_name = tokens
node, ob = scene_utils.find_node_by_name(node_name, ob_name)
if not node:
continue
if getattr(node, param) != item.name:
nodeIDs.append(nodeID)
for nodeID in nodeIDs:
bpy.ops.rman_txmgr_list.remove_texture('EXEC_DEFAULT', nodeID=nodeID)
return{'FINISHED'}
class PRMAN_OT_Renderman_txmanager_pick_images(Operator, ImportHelper):
"""Pick images from a directory."""
bl_idname = "rman_txmgr_list.pick_images"
bl_label = "Pick Images"
bl_description = "Manually choose images on disk to convert."
filename: StringProperty(maxlen=1024)
directory: StringProperty(maxlen=1024)
files: CollectionProperty(type=bpy.types.PropertyGroup)
def execute(self, context):
rman_txmgr_list = context.scene.rman_txmgr_list
if len(self.files) > 0:
for f in self.files:
img = os.path.join(self.directory, f.name)
nodeID = str(uuid.uuid1())
texture_utils.get_txmanager().txmanager.add_texture(nodeID, img)
bpy.ops.rman_txmgr_list.add_texture('EXEC_DEFAULT', filepath=img, nodeID=nodeID)
texture_utils.get_txmanager().txmake_all(blocking=False)
texture_utils.get_txmanager().txmanager.save_state()
PRMAN_PT_Renderman_txmanager_list.refresh_panel(context)
return{'FINISHED'}
class PRMAN_OT_Renderman_txmanager_clear_all_cache(Operator):
"""Clear RenderMan Texture cache"""
bl_idname = "rman_txmgr_list.clear_all_cache"
bl_label = "Flush Texture Cache"
bl_description = "Tell the core RenderMan to flush its texture cache."
def execute(self, context):
rr = rman_render.RmanRender.get_rman_render()
if rr.rman_interactive_running and rr.sg_scene:
texture_list = list()
for item in context.scene.rman_txmgr_list:
if item.nodeID != "":
output_texture = texture_utils.get_txmanager().get_output_tex_from_id(item.nodeID)
texture_list.append(output_texture)
if texture_list:
rr.rman_scene_sync.flush_texture_cache(texture_list)
return{'FINISHED'}
class PRMAN_OT_Renderman_txmanager_reconvert_all(Operator):
"""Clear all .tex files and re-convert."""
bl_idname = "rman_txmgr_list.reconvert_all"
bl_label = "RE-Convert All"
bl_description = "Clear all .tex files for all input images and re-convert."
def execute(self, context):
texture_utils.get_txmanager().txmanager.delete_texture_files()
texture_utils.get_txmanager().txmake_all(blocking=False)
return{'FINISHED'}
class PRMAN_OT_Renderman_txmanager_reconvert_selected(Operator):
"""Clear all .tex files and re-convert selected."""
bl_idname = "rman_txmgr_list.reconvert_selected"
bl_label = "RE-Convert Selected"
bl_description = "Clear all .tex files for selected image and re-convert"
def execute(self, context):
idx = context.scene.rman_txmgr_list_index
item = context.scene.rman_txmgr_list[idx]
txfile = None
txfile = texture_utils.get_txmanager().txmanager.get_txfile_from_id(item.nodeID)
if txfile:
rr = rman_render.RmanRender.get_rman_render()
txfile.delete_texture_files()
txfile.build_texture_dict()
if item.nodeID:
rr.rman_scene_sync.texture_updated(item.nodeID)
texture_utils.get_txmanager().txmake_all(blocking=False)
return{'FINISHED'}
class PRMAN_OT_Renderman_txmanager_apply_preset(Operator):
"""Apply current settings to the selected texture."""
bl_idname = "rman_txmgr_list.apply_preset"
bl_label = "Apply preset"
bl_description = "Apply the current settings for this input image and re-convert."
def execute(self, context):
idx = context.scene.rman_txmgr_list_index
item = context.scene.rman_txmgr_list[idx]
txsettings = dict()
for attr in item.txsettings:
val = getattr(item, attr)
if attr == 'data_type' and val == 'default':
val = None
txsettings[attr] = val
# b2r
bumprough = dict()
if item.bumpRough != "-1":
bumprough['normalmap'] = int(item.bumpRough)
bumprough['factor'] = item.bumpRough_factor
bumprough['invert'] = int(item.bumpRough_invert)
bumprough['invertU'] = int(item.bumpRough_invertU)
bumprough['invertV'] = int(item.bumpRough_invertV)
bumprough['refit'] = int(item.bumpRough_refit)
else:
bumprough = list()
txsettings['bumprough'] = bumprough
if txsettings:
txfile = None
txfile = texture_utils.get_txmanager().txmanager.get_txfile_from_id(item.nodeID)
if txfile:
txfile.params.from_dict(txsettings)
txfile.delete_texture_files()
txfile.build_texture_dict()
texture_utils.get_txmanager().txmake_all(blocking=False)
texture_utils.get_txmanager().txmanager.save_state()
# update any nodes with colorspace in it
tokens = item.nodeID.split('|')
if len(tokens) < 3:
return {'FINISHED'}
node_name,param,ob_name = tokens
prop_colorspace_name = '%s_colorspace' % param
try:
mdict = texture_utils.get_txmanager().txmanager.color_manager.colorspace_names()
val = 0
for i, nm in enumerate(mdict):
if nm == item.ocioconvert:
val = i+1
break
node, ob = scene_utils.find_node_by_name(node_name, ob_name)
if node:
node[prop_colorspace_name] = val
except AttributeError:
pass
return {'FINISHED'}
class PRMAN_OT_Renderman_txmanager_add_texture(Operator):
"""Add texture."""
bl_idname = "rman_txmgr_list.add_texture"
bl_label = "add_texture"
filepath: StringProperty()
nodeID: StringPropert |
'''
Created on Feb 23, 2015
@author: rgroten
'''
import ConfigParser
import ssl
from datetime import datetime
from flask.globals import g
# Import NetApp API libraries
from NaElement import NaElement
from NaServer import NaServer
# from flask.globals import g
def connect():
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
naHost = getConfigOption("NAHost")
user = getConfigOption("User")
password = getConfigOption("Password")
s = NaServer(naHost, 1 , 21)
s.set_server_type("FILER")
s.set_transport_type("HTTPS")
s.set_port(443)
s.set_style("LOGIN")
s.set_admin_user(user, password)
return s
def getConfigOption(option, section=None):
config = ConfigParser.ConfigParser()
config.read("config.ini")
# If section is not provided, first check if g.env is set and use that.
# Otherwise, set section to GENERAL
if not section:
try:
if g.env:
section = g.env
except:
section = "GENERAL"
return config.get(section, option)
def executeCmd(cmd):
isDebug = getConfigOption("Debug")
s= connect()
if (isDebug == 'True'):
print("Request Object: " + cmd.sprintf())
ret = s.invoke_elem(cmd)
if (ret.results_status() == "failed"):
print("Error: ")
print(ret.sprintf())
# Print object for debugging
if (isDebug == 'True'):
print( "Response Object: " + ret.sprintf())
return ret
def listVolumes():
isDebug = getConfigOption("Debug")
# Build command to list volumes
cmd = NaElement("volume-get-iter")
xi = NaElement("desired-attributes")
xi1 = NaElement("volume-attributes")
xi1.child_add(NaElement("volume-id-attributes"))
xi1.child_add(NaElement("volume-snapshot-attributes"))
xi1.child_add(NaElement("volume-space-attributes"))
xi2 = NaElement("volume-clone-attributes")
xi2.child_add(NaElement("volume-clone-parent-attributes"))
xi1.child_add(xi2)
xi.child_add(xi1)
cmd.child_add(xi)
cmd.child_add_string("max-records", "500")
ret = executeCmd(cmd)
# Remove volumes from list that contain filterStrings
filterString = getConfigOption("VolFilters")
filterList = filterString.replace(" ","").split(",")
filteredVolumes = NaElement("attributes-list")
for vol in ret.child_get("attributes-list").children_get():
volattrs = vol.child_get('volume-id-attributes')
if any(x in volattrs.child_get_string('name') for x in filterList):
if (isDebug == 'True'):
print "Skipping filtered vol : %s | " % volattrs.child_get_string('name')
continue
if (isDebug == 'True'):
print 'Volume Name : %s' % volattrs.child_get_string('name')
filteredVolumes.child_add(vol)
filteredRet = NaElement("results")
filt | eredRet.attr_set("status", "passed")
filteredRet.child_add(filteredVolumes)
if (isDebug == 'True'):
print "Number of volumes (after filtering): " + str(ret.child_get("attributes-list").children_get().__len__())
return filteredRet
def listSnapshots(volume):
cmd = NaElement('snapshot-list-info')
cmd.child_add_string('volume', volume)
ret = executeCmd(cmd)
return ret
def createSnapshot(volume, customname=None):
if customname:
snapshotName = customname
else:
# Create snapshot format name
snapshotName = "snap_" + volume + "_" + datetime.strftime(datetime.now(), "%Y%m%d%H%M%S")
cmd = NaElement('snapshot-create')
cmd.child_add_string("volume", volume)
cmd.child_add_string("snapshot", snapshotName)
return executeCmd(cmd)
def deleteSnapshot(volume, snapshot):
cmd = NaElement('snapshot-delete')
cmd.child_add_string("snapshot", snapshot)
cmd.child_add_string("volume", volume)
return executeCmd(cmd)
def restoreSnapshot(volume, snapshot):
cmd = NaElement('snapshot-restore-volume')
cmd.child_add_string("snapshot", snapshot)
cmd.child_add_string("volume", volume)
return executeCmd(cmd)
def renameSnapshot(volume, snapshot, newName):
cmd = NaElement('snapshot-rename')
cmd.child_add_string("current-name", snapshot)
cmd.child_add_string("volume", volume)
cmd.child_add_string("new-name", newName)
return executeCmd(cmd)
def createClone(parentVolume, volume):
cmd = NaElement('volume-clone-create')
cmd.child_add_string("parent-volume", parentVolume)
cmd.child_add_string("volume", volume)
# Feature disabled for now
debugret = NaElement("results")
debugret.attr_set("status", "failed")
debugret.attr_set("reason", "Creating clones not supported...yet!")
return debugret
def getEnvs():
envs = getConfigOption("Environments", "GENERAL").split(",")
envObjs = []
for env in envs:
try:
envObj = EnvObj(env)
envObjs.append(envObj)
except Exception as e:
print str(e)
print "Error: couldn't load options for environment: " + env
return envObjs
class EnvObj:
name = ""
rfcRequired = False
def __init__(self, envName):
self.get_env_properties(envName)
def get_env_properties(self, envName):
self.name = envName
self.rfcRequired = getConfigOption("RFCRequired", envName)
return self
def get_name(self):
return self.__name
def get_rfc_required(self):
return self.rfcRequired
def set_name(self, value):
self.__name = value
def set_rfc_required(self, value):
self.__rfcRequired = value
def del_name(self):
del self.__name
def del_rfc_required(self):
del self.__rfcRequired
name = property(get_name, set_name, del_name, "name's docstring")
rfcRequired = property(get_rfc_required, set_rfc_required, del_rfc_required, "rfcRequired's docstring")
|
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext as _
from selvbetjening.sadmin2 import menu
from selvbetjening.sadmin2.decorators import sadmin_pr | erequisites
from selvbetjening.sadmin2.forms import UserForm, PasswordForm
from selvbetjening.sadmin2.views.generic import generic_create_view
| @sadmin_prerequisites
def user_change(request, user_pk):
user = get_object_or_404(get_user_model(), pk=user_pk)
context = {
'sadmin2_menu_main_active': 'userportal',
'sadmin2_breadcrumbs_active': 'user',
'sadmin2_menu_tab': menu.sadmin2_menu_tab_user,
'sadmin2_menu_tab_active': 'user',
'user': user
}
return generic_create_view(request,
UserForm,
reverse('sadmin2:user', kwargs={'user_pk': user.pk}),
message_success=_('User updated'),
context=context,
instance=user)
@sadmin_prerequisites
def user_password(request, user_pk):
user = get_object_or_404(get_user_model(), pk=user_pk)
context = {
'sadmin2_menu_main_active': 'userportal',
'sadmin2_breadcrumbs_active': 'user_password',
'sadmin2_menu_tab': menu.sadmin2_menu_tab_user,
'sadmin2_menu_tab_active': 'password',
'user': user
}
return generic_create_view(request,
PasswordForm,
redirect_success_url=reverse('sadmin2:user_password', kwargs={'user_pk': user.pk}),
message_success=_('Password updated'),
context=context,
instance=user) |
# -*- coding: utf-8 -*-
from django.shortcuts import ren | der, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext as _
from inviMarket.models import User
@login_required
def del_partner(request, partner_id):
"""
Delete the :model:`auth.User` passed by argument from the partners list.
**Context**
``message``
A string variable used to inform the user.
**Template:**
:template:`inviMarket/addpartner.html`
"""
user = request.user
| partner = get_object_or_404(User.objects.select_related('profile'),
pk=partner_id)
message = _("Ther user is not your partner.")
if partner.profile.partners.filter(pk=user.id).exists():
partner.profile.partners.remove(user)
message = _("The partnership proposal has been rejected.")
user.notification_set.filter(code=20, sender=partner).delete()
if user.profile.partners.filter(pk=partner_id).exists():
user.profile.partners.remove(partner)
message = _("The user is no longer your partner.")
return render(request, 'message.html', {'message': message}) |
from typing import Iterable, Mapping, Optional
from lib import data
from ..channel import pyramid
from ..channel import wall
def filterMessage() -> Iterable[data.ChatCommand]:
return []
def commands() -> Mapping[str, Optional[data.ChatCommand]]:
if not hasattr(commands, 'commands'):
setattr(commands, | 'commands', {
'!pyramid': pyramid.commandPyramid,
'!rpyramid': pyramid.commandRandomPyramid,
'!wall': wall.commandWall,
})
return getattr(commands, 'commands')
def commandsStartWith() -> Mapping[str, Optional[data.ChatCommand]]:
if not hasattr(commandsStartWith, 'commands'):
setattr(commandsStartWith, 'commands', {
'!pyramid-': pyramid.commandPyramidLong,
'!wall-': wall.command | WallLong,
})
return getattr(commandsStartWith, 'commands')
def processNoCommand() -> Iterable[data.ChatCommand]:
return []
|
""" Class defining a production step """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import json
from DIRAC import S_OK, S_ERROR
class ProductionStep(object):
"""Define the Production Step object"""
def __init__(self, **kwargs):
"""Simple constructor"""
# Default values for transformation step parameters
self.Name = ""
self.Description = "description"
self.LongDescription = "longDescription"
self.Type = "MCSimulation"
self.Plugin = "Standard"
self.AgentType = "Manual"
self.FileMask = ""
####### | ##################################
self.ParentStep = None
self.Inputquery = None
self.Outputquery = None
self.GroupSize = 1
self.Body = "body"
def getAsDict(self):
"""It | returns the Step description as a dictionary"""
prodStepDict = {}
prodStepDict["name"] = self.Name
prodStepDict["parentStep"] = []
# check the ParentStep format
if self.ParentStep:
if isinstance(self.ParentStep, list):
prodStepDict["parentStep"] = []
for parentStep in self.ParentStep: # pylint: disable=not-an-iterable
if not parentStep.Name:
return S_ERROR("Parent Step does not exist")
prodStepDict["parentStep"].append(parentStep.Name)
elif isinstance(self.ParentStep, ProductionStep):
if not self.ParentStep.Name:
return S_ERROR("Parent Step does not exist")
prodStepDict["parentStep"] = [self.ParentStep.Name]
else:
return S_ERROR("Invalid Parent Step")
prodStepDict["description"] = self.Description
prodStepDict["longDescription"] = self.LongDescription
prodStepDict["stepType"] = self.Type
prodStepDict["plugin"] = self.Plugin
prodStepDict["agentType"] = self.AgentType
prodStepDict["fileMask"] = self.FileMask
# Optional fields
prodStepDict["inputquery"] = json.dumps(self.Inputquery)
prodStepDict["outputquery"] = json.dumps(self.Outputquery)
prodStepDict["groupsize"] = self.GroupSize
prodStepDict["body"] = json.dumps(self.Body)
return S_OK(prodStepDict)
|
# coding=UTF8
from __fu | ture__ import absolute_import
import os
from celery import Celery
from django.conf import settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mydjangoapp.settings")
app = Celery('mydjangoapp')
CELERY_TIMEZONE = 'UTC'
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) | |
# - | *- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigratio | n
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'billdetails.end_date'
db.alter_column(u'employee_billdetails', 'end_date', self.gf('django.db.models.fields.DateField')(null=True))
# Changing field 'billdetails.start_date'
db.alter_column(u'employee_billdetails', 'start_date', self.gf('django.db.models.fields.DateField')(null=True))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'billdetails.end_date'
raise RuntimeError("Cannot reverse this migration. 'billdetails.end_date' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'billdetails.end_date'
db.alter_column(u'employee_billdetails', 'end_date', self.gf('django.db.models.fields.DateField')())
# User chose to not deal with backwards NULL issues for 'billdetails.start_date'
raise RuntimeError("Cannot reverse this migration. 'billdetails.start_date' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'billdetails.start_date'
db.alter_column(u'employee_billdetails', 'start_date', self.gf('django.db.models.fields.DateField')())
models = {
u'employee.billdetails': {
'Meta': {'object_name': 'billdetails'},
'bill_type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'emp_name': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['employee.Employee']"}),
'emp_proj': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['employee.Project']"}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'employee.employee': {
'Add1': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'Add2': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'City': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'Designation': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'Major_Subject': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'Meta': {'object_name': 'Employee'},
'Qualification': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'Skill_sets': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'Visa_Status': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'Zip_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'bill': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'dob': ('django.db.models.fields.DateField', [], {}),
'doj': ('django.db.models.fields.DateField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '50'}),
'exp': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2'}),
'id': ('django.db.models.fields.IntegerField', [], {'max_length': '6', 'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'mobile': ('django.db.models.fields.IntegerField', [], {'max_length': '12'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'personal_email': ('django.db.models.fields.EmailField', [], {'max_length': '50', 'blank': 'True'}),
'proj': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['employee.Project']"}),
'start_date': ('django.db.models.fields.DateField', [], {'blank': 'True'})
},
u'employee.project': {
'Meta': {'object_name': 'Project'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['employee'] |
# -*- test-case-name: twisted.test.test_newcred -*-
from twisted.internet import defer
from twisted.python import components, failure
from twisted.cred import error, credentials
class ICredentialsChecker(components.Interface):
"""I check sub-interfaces of ICredentials.
@cvar credentialInterfaces: A list of sub-interfaces of ICredentials which
specifies which I may check.
"""
def requestAvatarId(self, credentials):
"""
| @param credentials: something which implements one of the interfaces in
self.credentialInterfaces.
@return: a Deferred which will fire a string which identifies an
avatar, an empty tuple to specify an authenticated anonymous user
(provided as checkers.ANONYMOUS) or fire a Failure(UnauthorizedLogin).
A note on anonymity - W | e do not want None as the value for anonymous
because it is too easy to accidentally return it. We do not want the
empty string, because it is too easy to mistype a password file. For
example, an .htpasswd file may contain the lines: ['hello:asdf',
'world:asdf', 'goodbye', ':world']. This misconfiguration will have an
ill effect in any case, but accidentally granting anonymous access is a
worse failure mode than simply granting access to an untypeable
username. We do not want an instance of 'object', because that would
create potential problems with persistence.
"""
ANONYMOUS = ()
class AllowAnonymousAccess:
__implements__ = ICredentialsChecker
credentialInterfaces = credentials.IAnonymous,
def requestAvatarId(self, credentials):
return defer.succeed(ANONYMOUS)
class InMemoryUsernamePasswordDatabaseDontUse:
credentialInterfaces = credentials.IUsernamePassword,
__implements__ = ICredentialsChecker
def __init__(self):
self.users = {}
def addUser(self, username, password):
self.users[username] = password
def _cbPasswordMatch(self, matched, username):
if matched:
return username
else:
return failure.Failure(error.UnauthorizedLogin())
def requestAvatarId(self, credentials):
if self.users.has_key(credentials.username):
return defer.maybeDeferred(
credentials.checkPassword,
self.users[credentials.username]).addCallback(
self._cbPasswordMatch, credentials.username)
else:
return defer.fail(error.UnauthorizedLogin())
|
b.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'job_runner.job': {
'Meta': {'ordering': "('title',)", 'unique_together': "(('title', 'job_template'),)", 'object_name': 'Job'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'disable_enqueue_after_fails': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'enqueue_is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'fail_times': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job_template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['job_runner.JobTemplate']"}),
'notification_addresses': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['job_runner.Job']"}),
'reschedule_interval': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'reschedule_interval_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'blank': 'True'}),
'reschedule_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '18', 'blank': 'True'}),
'script_content': ('django.db.models.fields.TextField', [], {}),
'script_content_partial': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'job_runner.jobtemplate': {
'Meta': {'ordering': "('title',)", 'object_name': 'JobTemplate'},
'auth_groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'enqueue_is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notification_addresses': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'worker': ('django.db.models.fields.related.ForeignKey', [], {'to' | : "orm['job_runner.Worker']"})
},
'job_runner.killrequest': {
'Meta': {'object_name': 'KillRequest'},
'enqueue_dts': ('django.db.models | .fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'execute_dts': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'run': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['job_runner.Run']"}),
'schedule_dts': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'})
},
'job_runner.project': {
'Meta': {'ordering': "('title',)", 'object_name': 'Project'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'enqueue_is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notification_addresses': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'job_runner.rescheduleexclude': {
'Meta': {'object_name': 'RescheduleExclude'},
'end_time': ('django.db.models.fields.TimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['job_runner.Job']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'start_time': ('django.db.models.fields.TimeField', [], {})
},
'job_runner.run': {
'Meta': {'ordering': "('-return_dts', '-start_dts', '-enqueue_dts', 'schedule_dts')", 'object_name': 'Run'},
'enqueue_dts': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_manual': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['job_runner.Job']"}),
'pid': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True'}),
'return_dts': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'return_success': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'schedule_children': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'schedule_dts': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'start_dts': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'})
},
'job_runner.runlog': {
'Meta': {'ordering': "('-run',)", 'object_name': 'RunLog'},
'content': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'run': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'run_log'", 'unique': 'True', 'to': "orm['job_runner.Run']"})
},
'job_runner.worker': {
'Meta': {'ordering': "('title',)", 'object_name': 'Worker'},
'api_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'enqueue_is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notification_addresse |
from django.conf.urls import url
from . import views
urlpatterns = [
# ex: /album/
| url(r'^$', views.index, name='index'),
# ex: /al | bum/create/
url(r'^welcome/$', views.welcome, name='welcome'),
# ex: /album/create/
url(r'^create/$', views.create, name='create'),
# ex: /album/vietnam_2016/
url(r'^(?P<album_permalink>[\w_]+)/$', views.detail, name='detail'),
# ex: /album/vietnam_2016/settings
url(r'^(?P<album_permalink>[\w_]+)/settings/', views.settings, name='settings'),
]
|
sent += len(chunk)
self.__write(chunk)
else:
for chunk in file_wrapper:
if the_len >= len(chunk):
the_len -= len(chunk)
count += len(chunk)
self.__bytes_sent += len(chunk)
self.__write(chunk)
else:
count += the_len
self.__bytes_sent += the_len
self.__write(chunk[:the_len])
break
except socket.error as e:
if e.errno == 54:
# Client disconnected, ignore
pass
else:
raise
except IOError as err:
if "failed to write data" in str(err) or "client connection closed" in str(err):
## Let's just log this exception without alerting the admin:
register_exception(req=self)
else:
raise
return self.__bytes_sent
def set_content_length(self, content_length):
if content_length is not None:
self.response.headers['content-length'] = str(content_length)
else:
del self.response.headers['content-length']
def is_https(self):
return self.__is_https
def get_method(self):
return request.environ['REQUEST_METHOD']
def get_hostname(self):
return request.environ.get('HTTP_HOST', '')
def set_filename(self, filename):
self.__filename = filename
if self.__disposition_type is None:
self.__disposition_type = 'inline'
self.response.headers['content-disposition'] = '%s; filename=%s' % (self.__disposition_type, self.__filename)
def set_encoding(self, encoding):
if encoding:
self.response.headers['content-encoding'] = str(encoding)
else:
del self.response.headers['content-encoding']
def get_bytes_sent(self):
return self.__bytes_sent
def log_error(self, message):
self.__errors.write(message.strip() + '\n')
def get_content_type_set_p(self):
return self.__content_type_set_p and \
bool(self.response.headers['content-type'])
def allow_methods(self, methods, reset=0):
if reset:
self.__allowed_methods = []
self.__allowed_methods += [method.upper().strip() for method in methods]
def get_allowed_methods(self):
return self.__allowed_methods
def readline(self, hint=None):
try:
return request.stream.readline(hint)
except TypeError:
## the hint param is not part of wsgi pep, although
## it's great to exploit it in when reading FORM
## with large files, in order to avoid filling up the memory
## Too bad it's not there :-(
return request.stream.readline()
def readlines(self, hint=None):
return request.stream.readlines(hint)
def read(self, hint=None):
return request.stream.read(hint)
def register_cleanup(self, callback, data=None):
@after_this_request
def f(response):
callback(data)
def get_cleanups(self):
return self.__cleanups
def get_referer(self):
return request.referrer
def get_what_was_written(self):
return self.__what_was_written
def __str__(self):
from pprint import pformat
out = ""
for key in dir(self):
try:
if not callable(getattr(self, key)) and not key.startswith("_SimulatedModPythonRequest") and not key.startswith('__'):
out += 'req.%s: %s\n' % (key, pformat(getattr(self, key)))
except:
pass
return out
def get_original_wsgi_environment(self):
"""
Return the original WSGI environment used to initialize this request
object.
@return: environ, start_response
@raise AssertionError: in case the environment has been altered, i.e.
either the input has been consumed or something has already been
written to the output.
"""
assert not self.__tainted, "The original WSGI environment is tainted since at least req.write or req.form has been used."
return self.__environ, self.__start_response
def get_environ(self):
return self.__environ
environ = property(get_environ)
content_type = property(get_content_type, set_content_type)
unparsed_uri = property(get_unparsed_uri)
uri = property(get_uri)
full_uri = property(get_full_uri)
headers_in = property(get_headers_in)
subprocess_env = property(get_subprocess_env)
args = property(get_args)
header_only = property(get_header_only)
status = property(get_status, set_status)
method = property(get_method)
hostname = property(get_hostname)
filename = property(fset=set_filename)
encoding = property(fset=set_encoding)
bytes_sent = property(get_bytes_sent)
content_type_set_p = property(get_content_type_set_p)
allowed_methods = property(get_allowed_methods)
response_sent_p = property(get_response_sent_p)
form = property(get_post_form)
remote_ip = property(get_remote_ip)
remote_host = property(get_remote_host)
referer = property(get_referer)
what_was_written = property(get_what_was_written)
def alert_admin_for_server_status_p(status, referer):
"""
Check the configuration variable
CFG_WEBSTYLE_HTTP_STATUS_ALERT_LIST to see if the exception should
be registered and the admin should be alerted.
"""
status = str(status)
for pattern in CFG_WEBSTYLE_HTTP_STATUS_ALERT_LIST:
pattern = pattern.lower()
must_have_referer = False
if pattern.endswith('r'):
## e.g. "404 r"
must_have_referer = True
pattern = patt | ern[:-1].strip() ## -> "404"
if fnmatch(status, pattern) and (not must_have_referer or referer):
| return True
return False
def application(environ, start_response, handler=None):
"""
Entry point for wsgi.
"""
## Needed for mod_wsgi, see: <http://code.google.com/p/modwsgi/wiki/ApplicationIssues>
req = SimulatedModPythonRequest(environ, start_response)
#print 'Starting mod_python simulation'
try:
if handler is None:
from invenio.ext.legacy.layout import invenio_handler
invenio_handler(req)
else:
handler(req)
req.flush()
## TODO for future reimplementation of stream_file
#except StreamFileException as e:
# return e.value
except SERVER_RETURN as status:
redirection, = status.args
from werkzeug.wrappers import BaseResponse
if isinstance(redirection, BaseResponse):
return redirection
status = int(str(status))
if status == 404:
from werkzeug.exceptions import NotFound
raise NotFound()
if status not in (OK, DONE):
req.status = status
req.headers_out['content-type'] = 'text/html'
admin_to_be_alerted = alert_admin_for_server_status_p(status,
req.headers_in.get('referer'))
if admin_to_be_alerted:
register_exception(req=req, alert_admin=True)
if not req.response_sent_p:
start_response(req.get_wsgi_status(), req.get_low_level_headers(), sys.exc_info())
map(req.write, generate_error_page(req, admin_to_be_alerted))
req.flush()
finally:
##for (callback, data) in req.get_cleanups():
## callback(data)
#if hasattr(req, '_session'):
# ## The session handler saves for caching a request_wrapper
# ## in req.
# ## This saves req as an attribute, creating a circular
# ## reference.
# ## Since we have have reached the end of the request handler
# ## we can safely drop the request_wrapper so to avoid
# ## memory leaks.
# |
import interact
class | EvtInteract(interact.Interact):
def __init__(self):
self.events = []
def checkEventInteraction(self, events):
self.events = events
self.checkInteraction()
| |
from . import (
Application,
Category,
Course,
Designation,
Major,
Project,
Requirement,
User,
Year,
)
Application = Applicatio | n.Application
Category = Category.Category
Course = Cour | se.Course
Designation = Designation.Designation
Major = Major.Major
Project = Project.Project
Requirement = Requirement.Requirement
User = User.User
Year = Year.Year
|
"""empty message
Revision ID: 0047 add smtp
Revises: 0046 remove long description
Create Date: 2020-11-08 01:28:28.386704
"""
# revision identifiers, used by Alembic.
revision = '0047 add smtp'
down_revision = '0046 remove long description'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('email_providers', sa.Column('smtp_password', sa.String(), nullable=True))
op.add_column('email_providers', sa.Column('smtp_server', sa.String(), nullable=True))
op.add_column('email_providers', sa.Column('smtp_user', sa.String(), nullable=True))
op.add_column('email_providers', sa.Column('available', sa.Boolean(), nullable=True))
op.add_column('email_providers', sa.Column('created_at', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade(): |
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('email_providers', 'smtp_user')
op.drop_column('email_providers', 'smtp_server')
op.drop_column('email_providers', 'smtp_password')
op.drop_column('email_providers', 'available')
op | .drop_column('email_providers', 'created_at')
# ### end Alembic commands ###
|
# -*- coding: UTF-8 -*-
# YaBlog
# (c) Regis FLORET
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the |
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMP | LIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Regis FLORET BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
import numpy as np
import os
import pandas as pd
import statsmodels.formula.api as smf
import sys
# @params: takes mobaid codes string
# @returns: list of mobaid strings
def splitCode(x):
if type(x) is str:
codes = x.split(',')
return codes
else:
return []
# @returns binary T/F if string code is in string/list x
def containsCode(code, x):
if code in x:
return 1
else:
return 0
# @param: takes char to be repeated c and number of repeats n
# @returns: a string with c repeated n times
def characterString(c, n):
r = ''
for i in range(n):
r = r + c
return r
# to debug lambda functions
def test(x):
print(x)
# combines boardings at the same stop
def combineRows(data):
# temp = data # debug
# print(temp.columns.values) # debug
# temp.drop('MobAids', 1 ,inplace=True) # debug
data = data.groupby(['ServiceDate','Run','ETA','DwellTime','Activity']).sum()
# 55-60 removes colums that have all 0 data
bool_column_df = data.apply(lambda x: (min(x) == 0) and (max(x) == 0))
bool_column_df.columns = ['values']
print(bool_column_df.values) # debug
columns = bool_column_df[bool_column_df.values].index.values
print(columns) # debug
data.drop(columns,1,inplace=True)
data.reset_index(inplace=True)
# print(data.columns.values) # debug
# print(data.equals(temp)) # debug
return(data)
# get data file from 1st argument
data = None
try:
data_path = os.path.join(os.pardir,'data',sys.argv[1])
data = pd.read_csv(data_path)
except IOError:
print('\n\tError: No file at ../data/' + sys.argv[1] + ' from ' + os.getcwd() + '\n')
quit()
except IndexError:
print('\n\tdwellTimeAnalysis.py takes a csv file from\n\n\t\tmain_repo\data\n\n\tassuming that the file is run in the Python_Scripts folder\n')
quit()
# gathers needed data
data.Activity = data.Activity.apply(lambda x: int(x))
# data = data.iloc(np.where((data.Activity == 0) | (data.Activity == 1)))
data = data[['ServiceDate','Run','ETA','DwellTime','Activity',
'MobAids']].loc[(data.Activity == 0) | (data.Activity == 1)]
allCodes = ['A','AM','AP','AR','BB','CA','CB','CI','CS','CT','H','H1','H2','HD','LI',
'MO','N','NR','OR','OX','PEL','PK','SA','SC','ST','SVC','U','V','V1','V2',
'WA','WG','WH','WK','WT','WX','0T']
data.MobAids = data.MobAids.apply(lambda x: splitCode(x))
# creates a column with binary values for each code
for code in allCodes:
data[code] = data.MobAids.apply(lambda x: containsCode(code, x))
# print(data) # debug
# Attempt to fix an error caused in the regression by this 0T
data.rename(columns={'0T' : 'OT'}, inplace=True)
# splits data into boading and deboarding
boardings = combineRows(data[data.Activity == 0])
# print(boardings) # debug
deboardings = combineRows(data[data.Activity == 1])
# for debugging
boardings | .to_csv('../data/single_day_boardings.csv')
deboardings.to_csv('../data/single_day_deboardings.csv')
###################################################################
# Need to check with Matthew #
# ----------------------------- #
# is total dwell time for | a stop is included for each client row? #
# or is total dwell time sum is divided among client rows? #
###################################################################
# regression for boarding dwell times
x = ' + '.join(boardings.columns.values[6:])
y = 'DwellTime'
reg_formula = y + ' ~ ' + x
# print reg_formula # debug
# boarding regression
lmb = smf.ols(formula=reg_formula, data=boardings).fit()
# deboarding regression
lmd = smf.ols(formula=reg_formula, data=deboardings).fit()
# writes data to file
orig_stdout = sys.stdout
output = open("../data/dwell_time_mobaid_regression.txt", 'w')
sys.stdout = output
top = characterString('#', 78) + '\n'
bottom = characterString('-', 78)
print top + characterString(' ', 34) + 'Boardings\n' + bottom
print lmb.summary()
print '\n\n' + top + characterString(' ', 33) + 'Deboardings\n' + bottom
print lmd.summary()
sys.stdout = orig_stdout
output.close()
#prints (debug purposes)
print top + characterString(' ', 34) + 'Boardings\n' + bottom
print lmb.summary()
print '\n\n' + top + characterString(' ', 33) + 'Deboardings\n' + bottom
print lmd.summary() |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'template.ui'
#
# Created: Sun Sep 18 19:19:10 2016
# by: pyside2-uic running on PySide2 2.0.0~alpha0
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(694, 497)
self.gridLayout = QtWidgets.QGridLayout(Form)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.splitter = QtWidgets.QSplitter(Form)
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setObjectName("splitter")
self.layoutWidget = QtWidgets.QWidget(self.splitter)
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.output = QtWidgets.QPlainTextEdit(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Monospace")
self.output.setFont(font)
self.output.setReadOnly(True)
self.output.setObjectName("output")
self.verticalLayout.addWidget(self.output)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.input = CmdInput(self.layoutWidget)
self.input.setObjectName("input")
self.horizontalLayout.addWidget(self.input)
self.historyBtn = QtWidgets.QPushButton(self.layoutWidget)
self.historyBtn.setCheckable(True)
self.historyBtn.setObjectName("historyBtn")
self.horizontalLayout.addWidget(self.historyBtn)
self.exceptionBtn = QtWidgets.QPushButton(self.layoutWidget)
self.exceptionBtn.setCheckable(True)
self.exceptionBtn.setObjectName("exceptionBtn")
self.horizontalLayout.addWidget(self.exceptionBtn)
self.verticalLayout.addLayout(self.horizontalL | ayout)
self.historyList = QtWidgets.QListWidget(self.splitter)
font = QtGui.QFont()
font.setFamily("Monospace")
self.historyList.setFont(font)
self.historyList.setObjectName("historyList")
self.exceptionGroup = QtWidgets.QGroupBox(self.splitter)
self.exceptionGroup.setObjectName("exceptionGroup")
self.gridLayout_2 = QtWidgets.QGridLayout(self.exceptionGroup)
self.gri | dLayout_2.setSpacing(0)
self.gridLayout_2.setContentsMargins(-1, 0, -1, 0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.clearExceptionBtn = QtWidgets.QPushButton(self.exceptionGroup)
self.clearExceptionBtn.setEnabled(False)
self.clearExceptionBtn.setObjectName("clearExceptionBtn")
self.gridLayout_2.addWidget(self.clearExceptionBtn, 0, 6, 1, 1)
self.catchAllExceptionsBtn = QtWidgets.QPushButton(self.exceptionGroup)
self.catchAllExceptionsBtn.setCheckable(True)
self.catchAllExceptionsBtn.setObjectName("catchAllExceptionsBtn")
self.gridLayout_2.addWidget(self.catchAllExceptionsBtn, 0, 1, 1, 1)
self.catchNextExceptionBtn = QtWidgets.QPushButton(self.exceptionGroup)
self.catchNextExceptionBtn.setCheckable(True)
self.catchNextExceptionBtn.setObjectName("catchNextExceptionBtn")
self.gridLayout_2.addWidget(self.catchNextExceptionBtn, 0, 0, 1, 1)
self.onlyUncaughtCheck = QtWidgets.QCheckBox(self.exceptionGroup)
self.onlyUncaughtCheck.setChecked(True)
self.onlyUncaughtCheck.setObjectName("onlyUncaughtCheck")
self.gridLayout_2.addWidget(self.onlyUncaughtCheck, 0, 4, 1, 1)
self.exceptionStackList = QtWidgets.QListWidget(self.exceptionGroup)
self.exceptionStackList.setAlternatingRowColors(True)
self.exceptionStackList.setObjectName("exceptionStackList")
self.gridLayout_2.addWidget(self.exceptionStackList, 2, 0, 1, 7)
self.runSelectedFrameCheck = QtWidgets.QCheckBox(self.exceptionGroup)
self.runSelectedFrameCheck.setChecked(True)
self.runSelectedFrameCheck.setObjectName("runSelectedFrameCheck")
self.gridLayout_2.addWidget(self.runSelectedFrameCheck, 3, 0, 1, 7)
self.exceptionInfoLabel = QtWidgets.QLabel(self.exceptionGroup)
self.exceptionInfoLabel.setObjectName("exceptionInfoLabel")
self.gridLayout_2.addWidget(self.exceptionInfoLabel, 1, 0, 1, 7)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem, 0, 5, 1, 1)
self.label = QtWidgets.QLabel(self.exceptionGroup)
self.label.setObjectName("label")
self.gridLayout_2.addWidget(self.label, 0, 2, 1, 1)
self.filterText = QtWidgets.QLineEdit(self.exceptionGroup)
self.filterText.setObjectName("filterText")
self.gridLayout_2.addWidget(self.filterText, 0, 3, 1, 1)
self.gridLayout.addWidget(self.splitter, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtWidgets.QApplication.translate("Form", "Console", None, -1))
self.historyBtn.setText(QtWidgets.QApplication.translate("Form", "History..", None, -1))
self.exceptionBtn.setText(QtWidgets.QApplication.translate("Form", "Exceptions..", None, -1))
self.exceptionGroup.setTitle(QtWidgets.QApplication.translate("Form", "Exception Handling", None, -1))
self.clearExceptionBtn.setText(QtWidgets.QApplication.translate("Form", "Clear Exception", None, -1))
self.catchAllExceptionsBtn.setText(QtWidgets.QApplication.translate("Form", "Show All Exceptions", None, -1))
self.catchNextExceptionBtn.setText(QtWidgets.QApplication.translate("Form", "Show Next Exception", None, -1))
self.onlyUncaughtCheck.setText(QtWidgets.QApplication.translate("Form", "Only Uncaught Exceptions", None, -1))
self.runSelectedFrameCheck.setText(QtWidgets.QApplication.translate("Form", "Run commands in selected stack frame", None, -1))
self.exceptionInfoLabel.setText(QtWidgets.QApplication.translate("Form", "Exception Info", None, -1))
self.label.setText(QtWidgets.QApplication.translate("Form", "Filter (regex):", None, -1))
from .CmdInput import CmdInput
|
#
#
#
import requests
from bs4 import BeautifulSoup
import re
import os
def all_links(URL,abs=False,session=None):
'''Generator function for all links in a page.
ARGS:
URL -> url of the page
abs -> (True) returns actual 'href's of each <a> tag (False) process each 'href' to generate the full link (WARNING: on false, skips the javascript links in page)
RETS
yields every link'''
if(session):
response=session.get(URL)
else:
response=requests.get(URL)
mysoup=BeautifulSoup(response.text)
for link in mysoup.find_all('a'):
ret=link.get('href')
if(abs):
yield ret
else:
if(ret[0:10]=="javascript"):
continue
if(ret[0]=='/'):
mat=re.match("(.+?\..+?\..{2,5})/",URL)
print(mat.group(1))
ret = mat.group(1) + ret
elif(ret[0] =='#'):
ret = URL + ret
elif(not re.match(".+?:.+",ret)):
ret = re.sub("/[^/]+$", "/"+ret , URL)
yield ret
def save_file(URL,session=None,dir="",replace=False,max_size=None,altname=None,chunksize=2048):
'''Saves a fi | le from web to disk.
ARGS:
URL -> URL of the file to be downl | oaded
session -> requests session if the file is only available in a session (typically login/auth/etc)
dir -> directory of the saved file can be either reletive to the script or absoloute path. example: "archive/" saves files in a folder named archive
replace -> if the file exists (True) replace it / (False) skip
max_size -> max size of the file in Bytes , if the size exceeds this, download will be aborted
altname -> name of the saved file ( if None: will attemp to retrive name from server, if fail: will attemp to pars the last part of URL into a file name , if fail: will name the file 'undefined'
chunksize -> size of each chunk for writing to disk in Bytes (A.K.A buffer size) default is 2KB
RETS:
True -> File already Exists
Number -> Bytes Written to disk
False -> Download Failed (max_size exceeded)
'''
if(altname==None):
if(session):
dlh = session.head(URL)
else:
dlh= requests.head(URL)
if (dlh.status_code != 200):
raise Exception(dlh.status_code)
try:
fileheader=dlh.headers['Content-Disposition']
mat=re.search('filename="(.*)"',fileheader)
filename=mat.group(1)
except:
mat2=re.search("/([^/]+?)$",URL)
if(mat2):
filename=mat2.group(1)
else:
filename='undefined'
else:
filename=altname
if (dir!="" and not os.path.exists(dir)):
os.makedirs(dir)
path=dir+filename
if(replace==False and os.path.exists(path)) :
return True
else:
if(session):
dl = session.get(URL, stream=True)
else:
dl = requests.get(URL, stream=True)
if (dl.status_code != 200):
raise Exception(dl.status_code)
with open(path, 'wb') as f:
for i,chunk in enumerate(dl.iter_content(chunksize)):
f.write(chunk)
if(max_size and f.tell()>max_size):
dl.close()
break;
else:
return f.tell()
return False
|
tamp)s,%(ip_address)s,%(warning_threshold)s,%(check_timestamp)s,%(refer)s)
"""
UPDATE_HEADER = "INSERT INTO %s.performance_utilizationstatus"
UPDATE_TAIL = """
(machine_name,current_value,service_name,avg_value,max_value,age,min_value,site_name,data_source,critical_threshold,device_name,severity,sys_timestamp,ip_address,warning_threshold,check_timestamp,refer )
values
(%(machine_name)s,%(current_value)s,%(service_name)s,%(avg_value)s,%(max_value)s,%(age)s,%(min_value)s,%(site_name)s,%(data_source)s,%(critical_threshold)s,%(device_name)s,%(severity)s,%(sys_timestamp)s,%(ip_address)s,%(warning_threshold)s,%(check_timestamp)s,%(refer)s)
ON DUPLICATE KEY UPDATE machine_name = VALUES(machine_name),current_value = VALUES(current_value),age=VALUES(age),site_name=VALUES(site_name),critical_threshold=VALUES(critical_threshold),severity=VALUES(severity),sys_timestamp=VALUES(sys_timestamp),ip_address=VALUES(ip_address),warning_threshold=VALUES(warning_threshold),check_timestamp=VALUES(check_timestamp),refer=VALUES(refer)
"""
ERROR_DICT ={404:'Device not found yet',405:'No SS Connected to BS-BS is not skipped'}
ERROR_FOR_DEVICE_OMITTED = [404]
kpi_rules = eval(Variable.get("kpi_rules"))
DEBUG = False
sv_to_ds_mapping = {}
#O7_CALC_Q = "calculation_q"
O7_CALC_Q = "poller_queue"
down_and_unresponsive_devices = eval(redis_hook_static_5.get("current_down_devices_all"))
def process_utilization_kpi(
parent_dag_name,
child_dag_name,
start_date,
schedule_interval,
celery_queue,
ss_tech_sites,
hostnames_ss_per_site,
ss_name,
utilization_attributes,
config_sites): #here config site is list of all sites in system_config var
utilization_kpi_subdag_dag = DAG(
dag_id="%s.%s"%(parent_dag_name, child_dag_name),
schedule_interval=schedule_interval,
start_date=start_date,
)
for service in utilization_attributes:
sv_to_ds_mapping [service.get("service_name")] ={"data_source":service.get("data_source"),"sector_type":service.get("sector_type")}
def get_calculated_ss_data():
ss_data = redis_hook_util_10.rget("calculated_ss_utilization_kpi")
combined_site_data = {}
for site_data in ss_data:
site_data = eval(site_data)
combined_site_data.update(site_data)
return combined_site_data
#To create SS dict
def format_data(**kwargs):
device_type = kwargs.get("params").get("technology")
utilization_attributes = kwargs.get("params").get("attributes")
machine_name = kwargs.get("params").get("machine_name")
ss_kpi_dict = {
'site_name': 'unknown' ,
'device_name': 'unknown',
'service_name': 'unknown',
'ip_address': 'unknown',
'severity': 'unknown',
'age': 'unknown',
'data_source': 'unknown',
'current_value': 'unknown',
'warning_threshold': 'unknown',
'critical_threshold': 'unknown',
'check_timestamp': 'unknown',
'sys_timestamp': 'unknown' ,
'refer':'unknown',
'min_value':'unknown',
'max_value':'unknown',
'avg_value':'unknown',
'machine_name':'unknown'
}
ss_data =redis_hook_util_10.rget("calculated_utilization_%s_%s"%(device_type,machine_name))
cur_processing_time = backtrack_x_min(time.time(),300) + 120 # this is used to rewind the time to previous multiple of 5 value so that kpi can be shown accordingly
ss_devices_list = []
for ss_device in ss_data:
ss_device = eval(ss_device)
hostname = ss_device.get('hostname')
for service in ss_device.get('services'):
data_source = sv_to_ds_mapping.get(service).get("data_source")
pmp_type = sv_to_ds_mapping.get(service).get("sector_type")
thresholds = get_severity_values(service)
ss_kpi_dict['critical_threshold']=thresholds[0]
ss_kpi_dict['data_source']=data_source
ss_kpi_dict['site_name']=ss_device.get('site')
#TODO: ok and unknown are only 2 sev for ss we can incluudethis in rules later
ss_kpi_dict['service_name']= service
ss_kpi_dict['machine_name']= machine_name
ss_kpi_dict['check_timestamp']=cur_processing_time
ss_kpi_dict['device_name']=ss_device.get('hostname')
ss_kpi_dict['sys_timestamp']=cur_processing_time
ss_kpi_dict['refer']=ss_device.get("%s_sector"%(pmp_type))
ss_kpi_dict['ip_address']=ss_device.get('ipaddress')
ss_kpi_dict['warning_threshold']= thresholds[1]
if not isinstance(ss_device.get(service),dict):
#handling cur_value if it is greater than 100
cur_value=ss_device.get(service)
if ss_device.get(service) and ss_device.get(service) != None:
cur_value=ss_device.get(service)
try:
if isinstance(curr_value,float) and cur_value and cur_value > 100.00:
cur_value = 100
except Exception:
logging.error("Exception while handling above 100 entries")
ss_kpi_dict['severity']= calculate_severity(service,ss_device.get(service))
ss_kpi_dict['age']= calculate_age(hostname,ss_kpi_dict['severity'],ss_device.get('device_type'),cur_processing_time,service)
ss_kpi_dict['current_value']=cur_value
ss_kpi_dict['avg_value']=cur_value
ss_kpi_dict['min_value']=cur_value
ss_kpi_dict['max_value']=cur_value
if ss_kpi_dict['current_value'] != None:
ss_devices_list.append(ss_kpi_dict.copy())
else:
for data_source in ss_device.get(service):
ds_values = ss_device.get(service)
curr_value= ss_device.get(service).get(data_source)
if isinstance(curr_value,str):
try:
curr_value=float(curr_value)
if isinstance(curr_value,float):
if curr_value > 100.00:
curr_value=100
except Exception:
logging.error("Unable to convert to float")
else:
if curr_value > 100.00:
curr_value=100
ss_kpi_dict['data_source']=data_source
ss_kpi_dict['severity']= calculate_severity(service,ds_values.get(data_source))
ss_kpi_dict['age']= calculate_age(hostname,ss_kpi_dict['severity'],ss_device.get('device_type'),cur_processing_time,service)
ss_kpi_dict['current_value'] = curr_value
ss_kpi_dict['avg_value']=curr_value
ss_kpi_dict['min_value']=curr_value
ss_kpi_dict['max_value']=curr_value
if ss_kpi_dict['current_value'] != None:
ss_devices_list.append(ss_kpi_dict.copy())
try:
if len(ss_devices_list) > 0:
redis_hook_util_10.rpush("formatted_util_%s_%s"%(device_type,machine_name),ss_devices_list)
else:
logging.info("No %s device found in %s after formatting "%(device_type,machine_name))
except Exception:
logging.error("Unable to push formatted SS data to redis")
def get_required_dat | a_ss(**kwargs):
site_name = kwargs.get("params").get("site_name")
device_type = kwargs.get("params").get("technology")
utilization_attributes = kwargs.get("params").get("attributes")
if "vrfprv" in site_name:
memc_con = vrfprv_me | mc_con
elif "pub" in site_name:
memc_con = pub_memc_con
else:
memc_con = memc_con_cluster
ss_data_dict = {}
all_ss_data = []
if site_name not in hostnames_ss_per_site.keys():
logging.warning("No SS devices found for %s"%(site_name))
return 1
for hostnames_dict in hostnames_ss_per_site.get(site_name):
host_name = hostnames_dict.get("hostname")
ip_address = hostnames_dict.get("ip_address")
ss_data_dict['hostname'] = host_name
ss_data_dict['ipaddress'] = ip_address
ss_data_dict['site_name'] = site_name
if host_name not in down_and_unresponsive_devices:
for service in utilization_attributes:
ss_data_dict[service.get('service_name')] = memc_con.get(service.get('utilization_key')%(host_name))
all_ss_data.append(ss_data_dict.copy())
if len(all_ss_data) == 0:
logging.info("No data Fetched ! Aborting Successfully")
return 0
try:
#redis_hook_util_10.rpush("%s_%s"%(device_type,site_name),all_ss_data)
print "++++++++++++"
print site_name.split("_")[0]
redis_hook_util_10.rpush("%s_%s"%(device_type,site_name.split("_")[0]),all_ss_data)
except Exception:
logging.warning("Unable to insert ss data into redis")
#pprint(all_ss_data)
def calculate_utilization_data_ss(**kwargs):
machine_name = kwargs.get("params").get("machine_name")
|
'''
Various tools to interface with pyGSTi for running GST experiments.
Created on May 16, 2018
Original Author: Guilhem Ribeill
Copyright 2018 Raytheon BBN Technologies
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from .PulsePrimitives import *
from .Cliffords import *
from .BasicSequences.helpers import create_cal_seqs
from .Compiler import compile_to_hardware
from itertools import chain
from random import choices
PYGSTI_PRESENT = False
try:
from pygsti.objects.circuit import Circuit
PYGSTI_PRESENT = True
except:
pass
#Default mapping from pyGSTi naming convention to QGL gates.
gst_gate_map = {"Gx": X90,
"Gy": Y90,
"Gi": Id}
def gst_map_1Q(gst_list, qubit, qgl_map=gst_gate_map, append_meas=True):
"""
Helper function that takes an arbitrarily nested list of pygsti gatestrings
and converts them into QGL sequences, keeping the same nesting of lists.
Inputs:
gst_list: GateString to convert, or possibly nested list of pyGSTi GateStrings.
qubit: QGL qubit to apply the sequence to
qgl_map: Dictionary that maps between pyGSTi "Gx" string to QGL pulse
append_meas: Append a measurement to each sequence.
Returns:
QGL sequences, preserving the input list nesting (as a generator)
"""
if isinstance(gst_list, Circuit):
gst_list = [gst_list]
for item in gst_list:
if isinstance(item, Circuit):
mapped = map(lambda x: qgl_map[str(x)](qubit), item.tup)
if append_meas:
yield list(chain(mapped, [MEAS(qubit)]))
else:
yield list(mapped)
elif isinstance(item, list):
yield list(gst_map_1Q(item, qubit, qgl_map=qgl_map, append_meas=append_meas))
def gst_map_2Q(gst_list, qubits, qgl_map=None, append_meas=False):
"""
Helper function that takes an arbitrarily nested list of pygsti gatestrings
and converts them into QGL sequences, keeping the same nesting of lists.
Inputs:
gst_list: GateString to convert, or possibly nested list of pyGSTi GateStrings.
qubit: QGL qubit to apply the sequence to
qgl_map: Dictionary that maps between pyGSTi "Gx" string to QGL pulse
append_meas: Append a measurement to each sequence.
Returns:
QGL sequences, preserving the input list nesting (as a generator)
"""
if isinstance(gst_list, GateString):
gst_list = [gst_list]
for item in gst_list:
if isinstance(item, GateString):
mapped = map(lambda x: qgl_map[x], item.tup)
if append_meas:
yield list(chain(mapped, [reduce(lambda x,y: x*y, map(MEAS, qubits))]))
else:
yield list(mapped)
elif isinstance(item, list):
yield list(gst_map_2Q(item, qubit, qgl_map=qgl_map, append_meas=append_meas))
def create_gst_sequence_from_pygsti(gst_list, qubit, gate_map=gst_gate_map):
""" Returns list of QGL sequences from a pyGSTi GateString list. See gst_map_1Q.
The return value is a list of sequences that can be complied by QGL.
"""
return list(gst_map_1Q(gst_list, qubit, qgl_map=gate_map, append_meas=True))
def pygsti_to_cliffords(gst_seq):
#Map from GST convention to cliffords
cliff_map = {"{}": 0,
"Gi": 1,
"Gx": 2,
"Gy": 5}
#convert to dictionary of lambdas for compatibility with gst_map_1Q
lambda_map = {k: lambda x, v=v: v for k, v in cliff_map.items()}
return list(gst_map_1Q(gst_seq, None, qgl_map=lambda_map,
append_meas=False))
def pauli_rand_clifford_circuit(gst_seq):
def seqreduce(s):
if not s:
return 0
else:
return reduce(lambda x,y: clifford_multiply(x,y), s)
def inv_cliff(c):
return inverse_clifford(clifford_mat(c, 1))
c_ps = [0, 2, 5, 8]
c_seqs = pygsti_to_cliffords(gst_seq)
r_seqs = []
for seq in c_seqs:
if not seq:
r_seqs.append([])
else:
rand_pauli = choices(c_ps, k=len(seq))
inter = 0
bare = 0
rseq = []
for j in range(len(seq)):
inter = clifford_multiply(clifford_multiply(inter, rand_pauli[j]), seq[j])
bare = clifford_multiply(bare, seq[j])
rseq.append(clifford_multiply(rand_pauli[j], seq[j]))
recovery = clifford_multiply(inv_cliff(inter), bare)
rseq[-1] = clifford_multiply(rseq[-1], recovery)
r_seqs.append(rseq)
all_ok = all((r == i for r, i in zip(map(seqreduce, r_seqs), map(seqreduce, c_seqs))))
assert all_ok, "Something went wrong when Pauli-frame randomizing!"
return r_seqs
def SingleQubitCliffordGST(qubit, pygsti_seq, pulse_library="Standard", randomized=False, num_cals=100, diac_compiled=True):
pulse_library = pulse_library.upper()
# QGL pulse libraries handle the Id pulse differently. In the standard
# case, the Id is of finite length equal to all the other one-pulse
# elements of the library. In the Atomic and DiAtomic cases, the ID is
# of length 0 by default. In GST, we need access to both types of the ID
# gate with the first experiment in any GST experiment equal to {} =
# Id(length = 0). All other Id gates in the sequence should be of finite
# length. So we | 'll modify the Clifford indexing here to make Id(length=0)
# the first element in the library and Id(length=length) the second.
if pulse_library == "STANDARD":
#cl | ifford_pulse = lambda x: clifford_seq(x, qubit)
clifford_pulse = [clifford_seq(i, qubit) for i in range(24)]
clifford_pulse.insert(0, Id(qubit, length=0.0))
elif pulse_library == "DIAC":
#clifford_pulse = lambda x: DiAC(qubit, x, diac_compiled)
clifford_pulse = [AC(qubit, i, diac_compiled) for i in range(24)]
clifford_pulse.insert(1, Id(qubit))
elif pulse_library == "AC":
#clifford_pulse = lambda x: AC(qubit, x)
clifford_pulse = [AC(qubit, i) for i in range(24)]
clifford_pulse.insert(1, Id(qubit))
raise ValueError("Pulse library must be one of 'standard', 'diac', or 'ac'. Got {} instead".format(pulse_library))
if randomized:
seqs = pauli_rand_clifford_circuit(pygsti_seq)
else:
seqs = pygsti_to_cliffords(pygsti_seq)
qgl_seqs = []
for seq in seqs:
qgl_seqs.append([clifford_pulse[c] for c in seq])
qgl_seqs[-1].append(MEAS(qubit))
if num_cals != 0:
qgl_seqs += create_cal_seqs((qubit, ), abs(num_cals))
metafile = compile_to_hardware(qgl_seqs, 'GST/GST')
return metafile
|
"""
Based on http://vaig.be/2009/03/getting-client-os-in-django.html
"""
import re
def client_os(user_agent):
'''
Context processor for Django that provides operating system
information base on HTTP user agent.
A user agent looks like (line break added):
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.6) \
Gecko/2009020409 Iceweasel/3.0.6 (Debian-3.0.6-1)"
'''
# Mozilla/5.0
regex = '(?P<application_name>\w+)/(?P<application_version>[\d\.]+)'
regex += ' \('
# X11
regex += '(?P<compatibility_flag>\w+)'
regex += '; '
# U
if "U;" in user_agent or "MSIE" in user_agent: # some UA strings leave out the U;
regex += '(?P<version_token>[\w .]+)'
regex += '; '
# Linux i686
regex += '(?P<platform_token>[\w ._]+)'
# anything else
regex += '; .*'
result = re.match(regex, user_agent)
if result:
resul | t_dict = result.groupdict()
full_platform = result_dict['platform_token']
platform_val | ues = full_platform.split(' ')
if platform_values[0] in ('Windows', 'Linux', 'Mac'):
platform = platform_values[0]
elif platform_values[1] in ('Mac',):
# Mac is given as "PPC Mac" or "Intel Mac"
platform = platform_values[1]
else:
platform = None
else:
# Total hack to avoid dealing with regex nightmares
if 'mac' in user_agent.lower():
full_platform = "Intel Mac 10.6"
platform = 'Mac'
elif 'windows' in user_agent.lower():
full_platform = "Windows"
platform = 'Windows'
else:
full_platform = None
platform = None
return {
'full_platform': full_platform,
'platform': platform,
}
|
import os
class Config(object):
SPOTIPY_REDIRECT_URI = os.environ['SPOTIPY_REDIRECT_URI']
SPOTIPY_CLIENT_ID = os.environ['SPOTIPY_CLIENT_ID']
SPOTIPY_CLIENT_SECRET = os.environ['SPOTIPY_CLIENT_SECRET']
SPOTIFY_ACCESS_SCOPE = 'playlist-modify-public playlist-modify-private playlist-read-private user-library-read'
###########
# Options #
###########
# TRACKS_PER_ARTIST #
# Number of tracks per artist to add to the playlist.
# I recommend 5 or less. Max is 10.
TRACKS_PER_ARTIST = 3
# COLLATE #
# By default, the playlist will be ordered like:
# - ARTIST A TRACK 1
# - ARTIST A TRACK 2
# - ARTIST A TRACK 3
# - ARTIST A TRACK 4
# - ARTIST A TRACK 5
# - ARTIST B TRACK 1
# - ARTIST B TRACK 2
# - ARTIST B TRACK 3
# ...
# if COLLATE is set to True, it will instead be ordered like so:
# - ARTIST A TRACK 1
# - ARTIST B TRACK 1
# - ARTIST C TRACK 1
# ...
# - ARTIST Z TRACK 1
# - ARTIST A TRACK 2
# - ARTIST B TRACK 2
# ...
COLLATE = False
# PUBLIC #
# Default F | alse. Set True to make you | r generated playlist public.
PUBLIC = False
|
"""
This migration script adds a user actions table to Galaxy.
"""
from sqlalchemy import *
from migrate import *
import datetime
now = datetime.datetime.utcnow
import logging
log = logging.getLogger( __name__ )
metadata = MetaData()
def display_migration_details():
print ""
print "This migration script adds a user actions table to Galaxy."
print ""
# New table to store user actions.
UserAction_table = Table( "user_action", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
Column( "session_id", Integer, ForeignKey( "galaxy_session.id" ), index=True ),
Column( "action", Unicode( 255 ) ),
Column( "context", Unicode( | 512 ) ),
Column( "params", Unicode( 1024 ) ) )
def upgrade(migrate_engine):
metadata.bind = migrate_engine
display_migration_details()
metadata.reflect()
try:
UserAction_table.create()
except Exception, e:
print str(e)
log.debug( "Creating user_action table failed: %s" % str( e ) )
def downgrade(migrate_engine):
metadata | .bind = migrate_engine
metadata.reflect()
try:
UserAction_table.drop()
except Exception, e:
print str(e)
log.debug( "Dropping user_action table failed: %s" % str( e ) )
|
#!/usr/bin/env python
# -*- coding: | utf-8 -*-
import os
import tornado.ioloop
try:
import WebApp
except ImportError, ImportWarning:
import entire as WebApp
if __name__ == "__main__":
ip = os.environ['OPENSHIFT_DIY_IP']
port = int(os.environ['OPENSHIFT_DIY_PORT'])
WebApp.application.listen(port, ip)
tornado.ioloop.IOLoop.insta | nce().start()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import shutil
import time
import subprocess
import numpy as np
from .phonopy_conf_creator import PhonopyConfCreator
from vasp.poscar import Poscar
from autotools import symlink_force
class PhononCalculator(object):
def __init__(self,
directory_data="./",
poscar_filename="POSCAR",
poscar_average_filename=None,
is_average_mass=False,
dim_sqs=None,
is_primitive=False,
is_band=True,
is_partial_dos=False,
is_tetrahedron=False,
is_tprop=False,
mesh=None,
nac=None):
if dim_sqs is None:
dim_sqs = np.array([1, 1, 1])
if mesh is None:
mesh = np.array([1, 1, 1])
self._variables = None
self._home = os.path.expanduser("~")
self._phonopy = subprocess.check_output(["which", "phonopy"]).strip()
print("phonopy_path:", self._phonopy)
self._directory_data = directory_data
self._poscar_filename = poscar_filename
self._poscar_average_filename = poscar_average_filename
self._is_average_mass = is_average_mass
self.set_dim_sqs(dim_sqs)
self._is_band = is_band
self.set_is_tetrahedron(is_tetrahedron)
self.set_is_partial_dos(is_partial_dos)
self.set_is_tprop(is_tprop)
self._is_primitive = is_primitive
self._mesh = np.array(mesh)
self._nac = nac
def set_dim_sqs(self, dim_sqs):
self._dim_sqs = dim_sqs
def set_is_tetrahedron(self, is_tetrahedron):
self._is_tetrahedron = is_tetrahedron
def set_is_partial_dos(self, is_partial_dos):
self._is_partial_dos = is_partial_dos
def set_is_tprop(self, is_tprop):
self._is_tprop = is_tprop
def set_mesh(self, mesh):
self._mesh = mesh
def set_variables(self, variables):
self._variables = variables
def run(self):
self.copy_files()
self.create_phonopy_conf()
conf_files = self.gather_conf_files()
for conf_file in conf_files:
self.run_phonopy(conf_file)
def copy_files(self):
dir_data = self._directory_data
symlink_force(os.path.join(dir_data, 'writefc.conf'), 'writefc.conf')
symlink_force(os.path.join(dir_data, 'POSCAR'), 'POSCAR')
symlink_force(os.path.join(dir_data, 'POSCAR_ideal'), 'POSCAR_ideal')
symlink_force(os.path.join(dir_data, 'FORCE_CONSTANTS'), 'FORCE_CONSTANTS')
def create_phonopy_conf(self):
directory_data = self._directory_data
dim_sqs = sel | f._dim_sqs
variables = self._variables
mesh = self._mesh.copy() |
print("directory_data:", directory_data)
print("mesh:", mesh)
spg_number = self.create_spg_number()
# Get band path for the specific space group
phonopy_conf_creator = PhonopyConfCreator(
spg_number,
mesh=mesh,
tmax=3000,
dim_sqs=dim_sqs,
is_average_mass=self._is_average_mass,
is_primitive=self._is_primitive,
band_points=101,
poscar_name="POSCAR", # For getting the chemical symbols
magmom_line=None,
variables=variables,
nac=self._nac,
)
phonopy_conf_creator.run()
def create_spg_number(self):
"""
spg_number is used to determine the primitive axis and band paths.
"""
if self._poscar_average_filename is not None:
poscar_filename = self._poscar_average_filename
else:
poscar_filename = self._poscar_filename
print('SPG number is searched from {}'.format(poscar_filename))
spg_number = Poscar(poscar_filename).get_symmetry_dataset()["number"]
print("spg_number:", spg_number)
return spg_number
def gather_conf_files(self):
conf_files = [
"dos_smearing.conf",
]
if self._is_band:
conf_files.append("band.conf")
if self._is_tetrahedron:
conf_files.append("dos_tetrahedron.conf")
if self._is_partial_dos:
conf_files.append("partial_dos_smearing.conf")
if self._is_tetrahedron and self._is_partial_dos:
conf_files.append("partial_dos_tetrahedron.conf")
if self._is_tprop:
conf_files.append("tprop.conf")
return conf_files
def run_phonopy(self, conf_file):
root = os.getcwd()
home = self._home
phonopy = self._phonopy
print("=" * 80)
print(conf_file)
print("=" * 80)
dir_name = conf_file.replace(".conf", "_calc")
log_file = conf_file.replace(".conf", ".log")
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
os.mkdir(dir_name)
os.chdir(dir_name)
for fn in [conf_file, "POSCAR", "FORCE_CONSTANTS", "BORN"]:
if os.path.exists(os.path.join("..", fn)):
os.symlink("../" + fn, fn)
if os.path.exists(log_file):
os.remove(log_file)
time1 = time.time()
with open(log_file, "w") as f:
subprocess.call(
[phonopy, conf_file, "-v"],
stdout=f,
)
time2 = time.time()
dtime = time2 - time1
print("Time for calc.: {:12.6f} s".format(dtime))
if conf_file == "tprop.conf":
subprocess.call(
["python", home + "/script/python/phonopy_tprop_arranger.py"]
)
os.chdir(root)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--datadir",
default="..",
type=str,
help="Data directory")
parser.add_argument("--tetrahedron",
action="store_true",
help="Calculate using tetrahedron method.")
parser.add_argument("--partial_dos",
action="store_true",
help="Calculate partial DOS.")
parser.add_argument("--tprop",
action="store_true",
help="Calculate thermal properties.")
args = parser.parse_args()
phonon_analyzer = PhononCalculator(
directory_data=args.datadir,
is_tetrahedron=args.tetrahedron,
is_partial_dos=args.partial_dos,
is_tprop=args.tprop,
)
phonon_analyzer.run()
if __name__ == "__main__":
main()
|
# coding: utf-8
import sqlalchemy as sa
import pandas as pd
from niamoto.data_providers.base_occurrence_provider import \
BaseOccurrenceProv | ider
from niamoto.exceptions import MalformedDataSourceError
class SQLOccurrenceProvider(BaseOccurrenceProvider):
"""
SQL occurrence provider. Instantiated with a sql query, that must return
AT LEAST the following columns:
id -> The provider's identifier for the occurrence.
| taxon_id -> The provider's taxon id for the occurrence.
x -> The longitude of the occurrence (WGS84).
y -> The latitude of the occurrence (WGS84).
All the remaining column will be stored as properties.
"""
REQUIRED_COLUMNS = set(['id', 'taxon_id', 'x', 'y'])
def __init__(self, data_provider, occurrence_sql):
super(SQLOccurrenceProvider, self).__init__(data_provider)
self.occurrence_sql = occurrence_sql
def get_provider_occurrence_dataframe(self):
connection = sa.create_engine(self.data_provider.db_url).connect()
df = pd.read_sql(self.occurrence_sql, connection, index_col='id')
cols = set(list(df.columns) + ['id', ])
inter = cols.intersection(self.REQUIRED_COLUMNS)
if not inter == self.REQUIRED_COLUMNS:
m = "The queried data does not contains the required columns " \
"('id', 'taxon_id', 'x', 'y'), " \
"queried data has: {}".format(cols)
raise MalformedDataSourceError(m)
if len(df) == 0:
return df
property_cols = cols.difference(self.REQUIRED_COLUMNS)
if len(property_cols) > 0:
properties = df[list(property_cols)].apply(
lambda x: x.to_json(),
axis=1
)
else:
properties = '{}'
df.drop(property_cols, axis=1, inplace=True)
df['properties'] = properties
location = df[['x', 'y']].apply(
lambda x: "SRID=4326;POINT({} {})".format(x['x'], x['y']),
axis=1
)
df['location'] = location
df.drop(['x', 'y'], axis=1, inplace=True)
return df
|
elf):
"""Step into the next iteration of the model."""
raise NotImplementedError("Please implement a step instance method")
class SimpleContainer(BaseModelClass):
"""
A container in the aquaponics loop.
Each container is a container/tank/basin/growbed/etc containing a volume
of water, with possibly water flowing out into the next component and
flowing into it from the previous container in the loop.
The inflow speed of each container is determined by the outflow speed of
the previous container. The outflow of each container only starts when in
the treshold has been reached, and only if the contents of the
container > 0 liters.
"""
_PARAMS = {
'previous': (_PARAM_TYPES.MODEL, 'previous'),
'outflow': (_PARAM_TYPES.FLOAT, 'outflow (l/min)'),
'start_content': (_PARAM_TYPES.INTEGER, 'start content (l)')
}
def __init__(self, previous, outflow, start_content=0):
"""
Args:
previous (Container): The previous Container in the chain.
outflow (float): The outflow speed of this container.
threshold (int): The threshold contents after which the container
outflow speed starts.
start_content (int): The starting contents of the container.
"""
self.previous = previous
self.outflow = outflow
self.state = self.start_content = start_content
def get_current_outflow_speed(self):
"""
Determine the current flow speed of water from this container.
Returns:
float: The current outflow speed.
"""
return self.outflow
def get_current_inflow_speed(self):
"""
Determine the current speed of water flowing into this container.
This is determined by the outflow speed of the previous container.
Returns:
float: The current inflow speed.
"""
return self.previous.get_current_outflow_speed()
def step(self, time=10):
"""
Go through the next step of the simulation of this container.
Args:
time (int): The length o | f the next ste | p in seconds.
"""
inflow = self.get_current_inflow_speed()
outflow = self.get_current_outflow_speed()
self.state += time / 60 * inflow - time / 60 * outflow
class Container(SimpleContainer):
_PARAMS = copy.deepcopy(SimpleContainer._PARAMS)
_PARAMS['threshold']= (_PARAM_TYPES.INTEGER, 'dump threshold (l)')
def __init__(self, previous, outflow, threshold, start_content=0):
"""
Args:
previous (Container): The previous Container in the chain.
outflow (float): The outflow speed of this container.
threshold (int): The threshold contents after which the container
outflow speed starts.
start_content (int): The starting contents of the container.
"""
self.previous = previous
self.outflow = outflow
self.threshold = threshold
self.state = self.start_content = start_content
def get_current_outflow_speed(self):
"""
Determine the current flow speed of water from this container.
Returns:
float: The current outflow speed.
"""
if self.state >= self.threshold:
return self.outflow
else:
return 0
class FloodDrainContainer(Container):
"""
This :class:`Container` will drain fully when the threshold has been
reached.
In other respects it works like other :class:`Containers <Container>` but
for the way it drains. A container with a U-siphon or bell siphon at the
end will only start draining when the waterlevel has reached a maximum.
When that happens, suction makes sure that all water is drained from the
container at the speed specified in outflow.
"""
def __init__(self, *args, **kwargs):
super(FloodDrainContainer, self).__init__(*args, **kwargs)
self.flooding = False
def get_current_outflow_speed(self):
"""
Return the current outlflow speed.
Outflow starts when self.threshold has been reached and will continue
at self.outflow speed until the container is empty.
Returns:
float: The outflow speed of this :class:`Container`
"""
if (self.flooding is True and self.state > 0)\
or self.state >= self.threshold:
self.flooding = True
return self.outflow
else:
self.flooding = False
return 0
class Pump(BaseModelClass):
"""
A general Pump object.
It pumps water into the system (from an unlimited source) and has a
constant outflow speed. It doesn't have contents (unlike containers for
instance). The state attribute contains the on (1) or off (0) state
of the pump, which is also what is plotted in the resulting graphs.
"""
_PARAMS = {
'outflow': (_PARAM_TYPES.FLOAT, 'outflow (l/min)'),
}
def __init__(self, outflow):
"""
Args:
outflow (float): The speed at which the pump pumps.
"""
self.outflow = outflow
self.state = 1
def get_current_outflow_speed(self):
"""
Return the pump speed of this pump.
Returns:
float: The outflow speed of this pump in L/min.
"""
return self.outflow
def step(self, time=10):
"""
Go through the next step of the pump state and return that state.
Args:
time (int): The time in seconds for which the pump state should be
returned.
Returns:
int: The state of the pump. 1=on 0=off.
"""
return self.state
class WaterSource(BaseModelClass):
"""
A general Water Source object.
Water flows at a static speed from a source (spring or other source).
It doesn't have contents (unlike containers for instance).
"""
_PARAMS = {
'outflow': (_PARAM_TYPES.FLOAT, 'outflow (l/min)'),
}
def __init__(self, outflow):
"""
Args:
outflow (float): The speed at which the watersource flows.
"""
self.outflow = outflow
self.state = None
def get_current_outflow_speed(self):
"""
Return the pump speed of this pump.
Returns:
float: The outflow speed of this source in L/min.
"""
return self.outflow
def step(self, time=10):
"""
Go through the next step of the source.
Args:
time (int): The time in seconds for which the pump state should be
returned.
"""
return
class TimedPump(Pump):
"""
A pump like the Pump object.
This pump has timing parameters which periodically switch it on and off.
This way the outflow speed of the pump is controlled. If it is on, it
equals the outflow speed parameter, else it is 0.
"""
_PARAMS = copy.deepcopy(Pump._PARAMS)
_PARAMS['ontime'] = (_PARAM_TYPES.FLOAT, 'on time (min)')
_PARAMS['offtime'] = (_PARAM_TYPES.FLOAT, 'off time (min)')
def __init__(self, ontime, offtime, outflow):
"""
Args:
ontime (float): The time in minutes the pump spends pumping.
offtime (float): The time in minutes the pump is off.
outflow (float): The speed at which the pump pumps in L/min.
"""
self.ontime = ontime * 60
self.offtime = offtime * 60
self.outflow = outflow
self.time_since_switch = 0
self.state = 1
def get_current_outflow_speed(self):
"""
Return the current outflow (pump) speed.
It is determined by a timed switch that toggles the pump on and off.
Returns:
float: The outflow speed in L/min
"""
log.debug("state %i, time since switch %i, ontime %i, offtime %i" %
(self.state, self.time_since_switch, self.ontime,
self.offtime))
|
print ("How o | ld are you?",)
age = input()
print ("How tall are you?",)
height = input()
print ("How much do you weigh?",)
weig | ht = input()
print ("So, you are %r years old, %r tall and %r heavy." %(age, height, weight))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''Hacky way to make sure imports work'''
from os.path import abspath, dirname, realpath, join
import sys
# This allows imports to work, even if sim_game is | not in python path:
package_location = abspath(join(dirname | (realpath(__file__)) , ".."))
sys.path.insert(0, package_location)
|
# -*- Mode: Python; python-indent-offset: 4 -*-
#
# Time-stamp: <2017-06-03 11:36:32 alex>
#
# --------------------------------------------------------------------
# PiProbe
# Copyright ( | C) 2016-2017 Alexandre Chauvin Hameau <ach@meta-x.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General | Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# --------------------------------------------------------------------
"""
database package, redis and test modules
"""
from . import dbRedis
from . import dbTest
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import json
import datetime
import mimetypes
import os
import frappe
from frappe import _
import frappe.model.document
import frappe.utils
import frappe.sessions
import werkzeug.utils
from werkzeug.local import LocalProxy
from werkzeug.wsgi import wrap_file
from werkzeug.wrappers import Response
from werkzeug.exceptions import NotFound, Forbidden
def report_error(status_code):
if (status_code!=404 or frappe.conf.logging) and not frappe.local.flags.disable_traceback:
frappe.errprint(frappe.utils.get_traceback())
response = build_response("json")
response.status_code = status_code
return response
def build_response(response_type=None):
if "docs" in frappe.local.response and not frappe.local.response.docs:
del frappe.local.response["docs"]
response_type_map = {
'csv': as_csv,
'download': as_raw,
'json': as_json,
'page': as_page,
'redirect': redirect
}
return response_type_map[frappe.response.get('type') or response_type]()
def as_csv():
response = Response()
response.headers[b"Content-Type"] = b"text/csv; charset: utf-8"
response.headers[b"Content-Disposition"] = ("attachment; filename=\"%s.csv\"" % frappe.response['doctype'].replace(' ', '_')).encode("utf-8")
response.data = frappe.response['result']
return response
def as_raw():
response = Response()
response.headers[b"Content-Type"] = frappe.response.get("content_type") or mimetypes.guess_type(frappe.response['filename'])[0] or b"application/unknown"
response.headers[b"Content-Disposition"] = ("filename=\"%s\"" % frappe.response['filename'].replace(' ', '_')).encode("utf-8")
response.data = frappe.response['filecontent']
return response
def as_json():
make_logs()
response = Response()
if frappe.local.response.http_status_code:
response.status_code = frappe.local.response['http_status_code']
del frappe.local.response['http_status_code']
response.headers[b"Content-Type"] = b"application/json; charset: utf-8"
response.data = json.dumps(frappe.local.response, default=json_handler, separators=(',',':'))
return response
def make_logs(response = None):
"""make strings for msgprint and errprint"""
if not response:
response = frappe.local.response
if frappe.error_log:
# frappe.response['exc'] = json.dumps("\n".join([cstr(d) for d in frappe.error_log]))
response['exc'] = json.dumps([frappe.utils.cstr(d) for d in frappe.local.error_log])
if frappe.local.message_log:
response['_server_messages'] = json.dumps([frappe.utils.cstr(d) for
d in frappe.local.message_log])
if frappe.debug_log and frappe.conf.get("logging") or False:
response['_debug_messages'] = json.dumps(frappe.local.debug_log)
def json_handler(obj):
"""serialize non-serializable data for json"""
# serialize date
if isinstance(obj, (datetime.date, datetime.timedelta, datetime.datetime)):
return unicode(obj)
elif isinstance(obj, LocalProxy):
return unicode(obj)
elif isinstance(obj, frappe.model.document.BaseDocument):
doc = obj.as_dict(no_nulls=True)
return doc
else:
raise TypeError, """Object of type %s with value of %s is not JSON serializable""" % \
(type(obj), repr(obj))
def as_page():
"""print web page"""
from frappe.website.render import render
return render(frappe.response['page_name'], http_status_code=frappe.response.get("http_status_code"))
def redirect():
return werkzeug.utils.redirect(frappe.response.location)
def download_backup(path):
try:
frappe.only_for(("System Manager", "A | dministrator"))
except frappe.PermissionError:
raise Forbidden(_("You need to be logged in and have System Manager Role to be able to access backups."))
return send_private_file(path)
def send_private_file(path):
path = os.path.join(frappe.local.conf.get('private_path', 'private'), path.strip("/"))
if frappe.local.request.headers.get('X-Use-X-Accel-Redirect'):
path = '/' + path
response = Resp | onse()
response.headers[b'X-Accel-Redirect'] = path
else:
filename = os.path.basename(path)
filepath = frappe.utils.get_site_path(path)
try:
f = open(filepath, 'rb')
except IOError:
raise NotFound
response = Response(wrap_file(frappe.local.request.environ, f))
response.headers.add(b'Content-Disposition', 'attachment', filename=filename.encode("utf-8"))
response.headers[b'Content-Type'] = mimetypes.guess_type(filename)[0] or b'application/octet-stream'
return response
def handle_session_stopped():
response = Response("""<html>
<body style="background-color: #EEE;">
<h3 style="width: 900px; background-color: #FFF; border: 2px solid #AAA; padding: 20px; font-family: Arial; margin: 20px auto">
Updating.
We will be back in a few moments...
</h3>
</body>
</html>""")
response.status_code = 503
response.content_type = 'text/html'
return response
|
f | rom goto_file2 import foo
f | oo
|
"""
Tests for `pyleset` module.
"""
import pytest
f | rom pyleset import pyleset
class TestPyleset(object):
@classmethod
def setup_class(cls):
pass
def test_something(self):
pass
@cl | assmethod
def teardown_class(cls):
pass |
n ]);\n}",
"function() {\n Foo([\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n ]);\n}");
self.options.keep_array_indentation = True;
bt("a = ['a', 'b', 'c',\n 'd', 'e', 'f']");
bt("a = ['a', 'b', 'c',\n 'd', 'e', 'f',\n 'g', 'h', 'i']");
bt("a = ['a', 'b', 'c',\n 'd', 'e', 'f',\n 'g', 'h', 'i']");
bt('var x = [{}\n]', 'var x = [{}\n]');
bt('var x = [{foo:bar}\n]', 'var x = [{\n foo: bar\n }\n]');
bt("a = ['something',\n 'completely',\n 'different'];\nif (x);");
bt("a = ['a','b','c']", "a = ['a', 'b', 'c']");
bt("a = ['a', 'b','c']", "a = ['a', 'b', 'c']");
bt("x = [{'a':0}]",
"x = [{\n 'a': 0\n}]");
bt('{a([[a1]], {b;});}',
'{\n a([[a1]], {\n b;\n });\n}');
bt("a();\n [\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n ].toString();",
"a();\n [\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n ].toString();");
bt("function() {\n Foo([\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n ]);\n}",
"function() {\n Foo([\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n ]);\n}");
self.options.keep_array_indentation = False;
bt('a = //comment\n/regex/;');
test_fragment('/*\n * X\n */');
test_fragment('/*\r\n * X\r\n */', '/*\n * X\n */');
bt('if (a)\n{\nb;\n}\nelse\n{\nc;\n}', 'if (a) {\n b;\n} else {\n c;\n}');
bt('var a = new function();');
test_fragment('new function');
self.options.brace_style = 'expand';
bt('//case 1\nif (a == 1)\n{}\n//case 2\nelse if (a == 2)\n{}');
bt('if(1){2}else{3}', "if (1)\n{\n 2\n}\nelse\n{\n 3\n}");
bt('try{a();}catch(b){c();}catch(d){}finally{e();}',
"try\n{\n a();\n}\ncatch (b)\n{\n c();\n}\ncatch (d)\n{}\nfinally\n{\n e();\n}");
bt('if(a){b();}else if(c) foo();',
"if (a)\n{\n b();\n}\nelse if (c) foo();");
bt("if (a) {\n// comment\n}else{\n// comment\n}",
"if (a)\n{\n // comment\n}\nelse\n{\n // comment\n}"); # if/else statement with empty body
bt('if (x) {y} else { if (x) {y}}',
'if (x)\n{\n y\n}\nelse\n{\n if (x)\n {\n y\n }\n}');
bt('if (a)\n{\nb;\n}\nelse\n{\nc;\n}',
'if (a)\n{\n b;\n}\nelse\n{\n c;\n}');
test_fragment(' /*\n* xx\n*/\n// xx\nif (foo) {\n bar();\n}',
' /*\n * xx\n */\n // xx\n if (foo)\n {\n bar();\n }');
| bt('if (foo)\n{}\nelse /regex/.test();');
bt('if (foo) /regex/.test();');
bt('if (a)\n{\nb;\n}\nelse\n{\nc;\n}', 'if (a)\n{\n b;\n}\nelse\n{\n c;\n}');
test_fragment('if (foo) {', 'if (foo)\n{');
test_fragment('foo {', 'foo\n{');
test_fragment('return {', 'return {'); # return needs the br | ace.
test_fragment('return /* inline */ {', 'return /* inline */ {');
# test_fragment('return\n{', 'return\n{'); # can't support this?, but that's an improbable and extreme case anyway.
test_fragment('return;\n{', 'return;\n{');
bt("throw {}");
bt("throw {\n foo;\n}");
bt('var foo = {}');
bt('if (foo) bar();\nelse break');
bt('function x() {\n foo();\n}zzz', 'function x()\n{\n foo();\n}\nzzz');
bt('a: do {} while (); xxx', 'a: do {} while ();\nxxx');
bt('var a = new function();');
bt('var a = new function() {};');
bt('var a = new function a()\n {};');
test_fragment('new function');
bt("foo({\n 'a': 1\n},\n10);",
"foo(\n {\n 'a': 1\n },\n 10);");
bt('(["foo","bar"]).each(function(i) {return i;});',
'(["foo", "bar"]).each(function(i)\n{\n return i;\n});');
bt('(function(i) {return i;})();',
'(function(i)\n{\n return i;\n})();');
bt( "test( /*Argument 1*/ {\n" +
" 'Value1': '1'\n" +
"}, /*Argument 2\n" +
" */ {\n" +
" 'Value2': '2'\n" +
"});",
# expected
"test( /*Argument 1*/\n" +
" {\n" +
" 'Value1': '1'\n" +
" },\n" +
" /*Argument 2\n" +
" */\n" +
" {\n" +
" 'Value2': '2'\n" +
" });");
bt( "test(\n" +
"/*Argument 1*/ {\n" +
" 'Value1': '1'\n" +
"},\n" +
"/*Argument 2\n" +
" */ {\n" +
" 'Value2': '2'\n" +
"});",
# expected
"test(\n" +
" /*Argument 1*/\n" +
" {\n" +
" 'Value1': '1'\n" +
" },\n" +
" /*Argument 2\n" +
" */\n" +
" {\n" +
" 'Value2': '2'\n" +
" });");
bt( "test( /*Argument 1*/\n" +
"{\n" +
" 'Value1': '1'\n" +
"}, /*Argument 2\n" +
" */\n" +
"{\n" +
" 'Value2': '2'\n" +
"});",
# expected
"test( /*Argument 1*/\n" +
" {\n" +
" 'Value1': '1'\n" +
" },\n" +
" /*Argument 2\n" +
" */\n" +
" {\n" +
" 'Value2': '2'\n" +
" });");
self.options.brace_style = 'collapse';
bt('//case 1\nif (a == 1) {}\n//case 2\nelse if (a == 2) {}');
bt('if(1){2}else{3}', "if (1) {\n 2\n} else {\n 3\n}");
bt('try{a();}catch(b){c();}catch(d){}finally{e();}',
"try {\n a();\n} catch (b) {\n c();\n} catch (d) {} finally {\n e();\n}");
bt('if(a){b();}else if(c) foo();',
"if (a) {\n b();\n} else if (c) foo();");
bt("if (a) {\n// comment\n}else{\n// comment\n}",
"if (a) {\n // comment\n} else {\n // comment\n}"); # if/else statement with empty body
bt('if (x) {y} else { if (x) {y}}',
'if (x) {\n y\n} else {\n if (x) {\n y\n }\n}');
bt('if (a)\n{\nb;\n}\nelse\n{\nc;\n}',
'if (a) {\n b;\n} else {\n c;\n}');
test_fragment(' /*\n* xx\n*/\n// xx\nif (foo) {\n bar();\n}',
' /*\n * xx\n */\n // xx\n if (foo) {\n bar();\n }');
bt('if (foo) {} else /regex/.test();');
bt('if (foo) /regex/.test();');
bt('if (a)\n{\nb;\n}\nelse\n{\nc;\n}', 'if (a) {\n b;\n} else {\n c;\n}');
test_fragment('if (foo) {', 'if (foo) {');
test_fragment('foo {', 'foo {');
test_fragment('return {', 'return {'); # return needs the brace.
test_fragment('return /* inline */ {', 'return /* inline */ {');
# test_fragment('return\n{', 'return\n{'); # can't support this?, but that's an improbable and extreme case anyway.
test_fragment('return;\n{', 'return; {');
bt("throw {}");
bt("throw {\n foo;\n}");
bt('var foo = {}');
bt('if (foo) bar();\nelse break');
bt('function x() {\n foo();\n}zzz', 'function x() {\n foo();\n}\nzzz');
bt('a: do {} while (); xxx', 'a: do {} while ();\nxxx');
bt('var a = new function();');
bt('var a = new function() {};');
bt('var a = new function a() {};');
test_fragment('new function');
bt("foo({\n 'a': 1\n},\n10);",
"foo({\n 'a': 1\n },\n 10);");
bt('(["foo","bar"]).each(function(i) {return i;});',
'(["foo", "bar"]).each(function(i) {\n return i;\n});');
bt('(function(i) {return i;})();',
'(function(i) {\n return i;\n})();');
bt( "test( /*Argument 1*/ {\n" +
" 'Value1': '1'\n" +
"}, /*Argument 2\n" +
" */ {\n" +
" 'Value2': '2'\n" +
"});",
# expected
|
from tests.package.test_python import TestPythonPackageBase
class TestPythonPy2Subproce | ss32(TestPythonPackageBase):
__test__ = True
config = TestPythonPackageBase.config + \
"""
BR2_PACKAGE_PYTHON=y
BR2_PACKAGE_PYTHON_SUBPROCESS32=y
| """
sample_scripts = ["tests/package/sample_python_subprocess32.py"]
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
import pytest
from llnl.util.filesystem import touch, working_dir
import spack.repo
import spack.config
from spack.spec import Spec
from spack.version import ver
from spack.util.executable import which
pytestmark = pytest.mark.skipif(
not which('svn'), reason='requires subversion to be installed')
@pytest.mark.parametrize("type_of_test", ['default', 'rev0'])
@pytest.mark.parametrize("secure", [True, False])
def test_fetch(
type_of_test,
secure,
mock_svn_repository,
config,
mutable_mock_packages
):
"""Tries to:
1. Fetch the repo using a fetch strategy constructed with
supplied args (they depend on type_of_test).
2. Check if the test_file is in the checked out repository.
3. Assert that the repository is at the revision supplied.
4. Add and remove some files, then reset the repo, and
ensure it's all there again.
"""
# Retrieve the right test parameters
t = mock_svn_repository.checks[type_of_test]
h = mock_svn_repository.hash
# Construct the package under test
spec = Spec('svn-test')
spec.concretize()
pkg = spack.repo.get(spec)
pkg.versions[ver('svn')] = t.args
# Enter the stage directory and check some properties
with pkg.stage:
with spack.config.override('config:verify_ssl', secure):
pkg.do_stage()
with working_dir(pkg.stage.source_path):
assert h() == t.revision
file_path = os.path.join(pkg.stage.source_path, t.file)
assert os.path.isdir(pkg.stage.source_path)
assert os.path.isfile(f | ile_path)
os.unlink(file_path)
assert not os.path.isfile(file_path)
u | ntracked_file = 'foobarbaz'
touch(untracked_file)
assert os.path.isfile(untracked_file)
pkg.do_restage()
assert not os.path.isfile(untracked_file)
assert os.path.isdir(pkg.stage.source_path)
assert os.path.isfile(file_path)
assert h() == t.revision
|
import logging
from anubis.model import builtin
from anubis.model import domain
from anubis.util import argmethod
_logger = logging.get | Logger(__name__)
def wrap(method):
async def run():
_logger.info('Built in domains')
for ddoc in builtin.DOMAINS:
_logger.info('Domain: {0}'.format(ddoc['_id'])) |
await method(ddoc['_id'])
_logger.info('User domains')
ddocs = domain.get_multi(fields={'_id': 1})
async for ddoc in ddocs:
_logger.info('Domain: {0}'.format(ddoc['_id']))
await method(ddoc['_id'])
if method.__module__ == '__main__':
argmethod._methods[method.__name__] = method
argmethod._methods[method.__name__ + '_all'] = run()
return method
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import partialdate.fields
class Migration(migrations.Migration):
dependencies = [
('genealogio', '0023_auto_20160303_2105'),
]
operations = [
migrations.AlterField(
model_name='event',
name='date',
field=partialdate.fields.PartialDateField(default='', help_text='Datum im Format JJJJ-MM-TT (Teilangaben m\xf6glich)', verbose_name='Datum', blank=True),
),
migrations.AlterField(
model_name='family',
name='end_date',
field=partialdate.fields.PartialDateField(default='', help_text='Datum im Format JJJJ-MM-TT (Teilangaben m\xf6glich)', verbose_name='Enddatum', blank=True),
),
migrations.AlterField(
model_name='family',
name='start_date',
field=partialdate.fields.PartialDateField(default='', help_text='Datum im Format JJJJ-MM-TT (Teilangaben m\xf6glich)', verbose_name='Anfangsdatum', blank=True),
),
migrations.AlterField(
model_name='person',
name='datebirth',
field=partialdate.fields.PartialDateField(default='', help_text='Datum im Format JJJJ-MM-TT (Teilangaben m\xf6glich)', verbose_name='Geburtsdatum', blank=True),
),
migrations.AlterField(
model_name='person',
name='datedeath',
field=partialdate.fields.PartialDateField(default='', help_text='Datum im Format JJJJ-MM-TT (Teilangaben m\xf6glich)', verbose_name='Todesdatum', blank=True),
),
migrations.AlterField(
model_name='personplace',
name='end',
field=partialdate.fields.PartialDateField(default='', help_text='Datum im Format JJJJ-MM-TT (Teilangaben m\xf6glich)', verbose_name='Ende', blank=True),
),
migrations.AlterField(
model_name='personplace',
name='start',
field=partialdate.fields.PartialDateField(default='', help_text='Datum im Format JJJJ-MM-TT (Teilangaben m\xf6glich)', verbose_name='Beginn', blank=True),
),
migrations.AlterField(
model_name='timelineitem',
name='description',
field=models.TextField(default='', help_text='Wird beim pdf-Export verwendet, kann als ReST formattiert werden, mit Links auf Objekte der Datenbank (siehe Dokumentation).', verbose_name='Beschreibung', blank=True),
),
migrations.AlterField(
model_name='timelin | eitem',
name='end_date',
field=partialdate.fields.PartialDateField(default='', help_text='Datum im Format JJJJ-MM-TT (Teilangaben m\xf6glich); kann freibleiben', verbose_name='Enddatum', blank=True),
),
migrations.AlterField(
model_name='timelineitem',
name='start_date',
field=partialdate.fields.Par | tialDateField(default='', help_text='Datum im Format JJJJ-MM-TT (Teilangaben m\xf6glich)', verbose_name='Startdatum', blank=True),
),
]
|
import os, sys
# to read dependencies from ./lib direcroty
script_dir = os.path.dirname( os.path.realpath(__file__) )
sys.path.insert(0, script_dir + os.sep + "lib")
import logging, boto3, json, random
# for dynamodb filter queries
from boto3.dynamodb.conditions import Key, Attr
# setup log level to DEBUG
log = logging.getLogger()
log.setLevel(logging.DEBUG)
# initialize DynamoDB client
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(os.environ['DYNAMO_TABLE'])
# During the day players are pointing to each other to blame for murder in the night
def handler(event, context): |
return response( {"Message": "Welcome to the Serverless Workshop fully powered by AWS Lambda elastic cloud computing service"}, event)
def response(body, event, code=200):
if 'resource' in event and 'httpMethod' in event:
retu | rn {
'statusCode': code,
'headers': {},
'body': json.dumps(body, indent=4, separators=(',', ':'))
}
return body |
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Import related utilities and helper functions.
"""
import sys
import traceback
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
__import__(mod_str)
try:
return getattr(sys.modules[mod_str], class_str)
except AttributeError:
raise ImportError('Class %s cannot be fou | nd (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))
def import_object(import_str, *args, **kwargs):
"""Import a class and return an instance of it."""
return import_class(import_str)(*args, **kwargs)
def import_object_ns(name_space, import_str, *args, **kwargs):
"""Tries to impor | t object from default namespace.
Imports a class and return an instance of it, first by trying
to find the class in a default namespace, then failing back to
a full path if not found in the default namespace.
"""
import_value = "%s.%s" % (name_space, import_str)
try:
return import_class(import_value)(*args, **kwargs)
except ImportError:
return import_class(import_str)(*args, **kwargs)
def import_module(import_str):
"""Import a module."""
__import__(import_str)
return sys.modules[import_str]
def import_versioned_module(version, submodule=None):
module = 'os_net_config.v%s' % version
if submodule:
module = '.'.join((module, submodule))
return import_module(module)
def try_import(import_str, default=None):
"""Try to import a module and if it fails return default."""
try:
return import_module(import_str)
except ImportError:
return default
|
import numpy as np
import scipy.cluster.hierarchy as hr
import scipy.spatial as spa
import clustering
import matplotlib.pyplot as plt
from sklearn.cluster import AgglomerativeClustering
import filter
class textMiningEac:
def __init__(self,k,N,low,high=0):
self.k = k
# Leer datos desde archivo [Temporal]
#data = np.genfromtxt('iris.data',delimiter=',')
#temp= spa.distance.pdist(data,'euclidean')
#self.D = spa.distance.squareform(temp)
self.D,self.tweets,self.words,self.freq = filter.filtertweets()
# Calcula la matriz de coasociacion
self.loadEAC(N,low,high)
def loadEAC(self,N,low,high=0):
"""
Genera de vuelta la matriz de coasociacion
"""
m,n = self.D.shape
coasocMatrix = clustering.EAC(self.D,N,low,high)
print(coasocMatrix)
self.EAC_D | = np.ones(n) - coasocMatrix
def startPAM(self):
"""
Hace sobre PAM sobre la matriz de distancia del EAC
"""
(a,b,self.labels) = clustering.PAM(self.EAC_D, self.k,True)
return self.labels
def startHierarchical(self):
"""
Hace cl | ustering Jerarquico sobre la matriz de distancia del EAC
"""
z = AgglomerativeClustering(n_clusters=self.k, linkage='ward').fit(self.EAC_D)
self.labels = z.labels_
return self.labels
def getClustersTweets(self):
"""
Obtiene clusters en relacion a la frecuencia de aparicion de las palabras
"""
labelsTweets = np.zeros(len(self.tweets),dtype=np.int)
for i in range(len(self.tweets)):
acum = np.zeros(2)
for j in range(len(self.labels)):
# Si la palabra se encuentra en el tweet
if(self.words[j] in self.tweets[i]):
#Acumula el valor en el acumulador del indice del cluster
acum[self.labels[j]] += self.freq[j]
# Asigna el cluster con mayor valor acumulado
labelsTweets[i] = np.argmax(acum)
lista = labelsTweets.tolist()
try:
saveFile = open('clustered.csv','w')
for i in range(len(self.tweets)):
saveFile.write(str(lista[i])+': '+' '.join(self.tweets[i])+'\n')
saveFile.close()
except Exception as e:
print("error: {0}".format(e))
return labelsTweets
def getPrecisionIris(self):
"""
Metodo de prueba
Calcula una precision de acierto. No es fiable.
"""
#Lee los cluster originales
originalClusters = np.genfromtxt('orCL.data',delimiter=',',dtype=None)
results ={}
j=0
for i in range(50,151,50):
# Encuentra el cluster con mayor frecuencia
unique, counts = np.unique(self.labels[i-50:i], return_count=True)
print(unique)
print(counts)
maxvalue = np.amax(counts)
results[j]=maxvalue/50
j=j+1
print("Setosa= " + '%.2f' % results[0] + "\nVersicolor= " + '%.2f' % results[1] + "\nVirginica= " + '%.2f' % results[2])
def getSilhouette(self):
"""
Grafica silhouette
"""
clustering.Silhouette(self.D,self.labels,self.k) |
import tornado.web
from datetime import date
from sqlalchemy.orm.exc import NoResultFound
from pyprint.handler import BaseHandler
from pyprint.models import User, Link, Post
class SignInHandler(BaseHandler):
def get(self):
return self.background_render('login.html')
def post(self):
username = self.get_argument('username', None)
password = self.get_argument('password', None)
if username and password:
try:
user = self.orm.query(User).filter(User.username == username).one()
except NoResultFound:
return self.redirect('/login')
if user.check(password):
self.set_secure_cookie('username', user.username)
self.redirect('/kamisama/posts')
return self.redirect('/login')
class ManagePostHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
posts = self.orm.query(Post.title, Post.id).order_by(Post.id.desc()).all()
self.background_render('posts.html', posts=posts)
@tornado.web.authenticated
def post(self):
action = self.get_argument('action', None)
if action == 'del':
post_id = self.get_argument('id', 0)
if post_id:
post = self.orm.query(Post).filter(Post.id == post_id).one()
self.orm.delete(post)
self.orm.commit()
class AddPostHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
self.background_render('add_post.html', post=None)
@tornado.web.authenticated
def post(self):
title = self.get_argument('title', None)
content = self.get_argument('content', None)
tags = self.get_argument('tags', '').strip().split(',')
if not title or not content:
return self.redirect('/kamisama/posts/add')
post = self.orm.query(Post.title).filter(Post.title == title).all()
if post:
return self.write('<script>alert("Title has already existed");window.history.go(-1);</script>')
self.orm.add(Post(title=title, content=content, created_time=date.today()))
self.orm.commit()
return self.redirect('/kamisama/posts')
class AddLinkHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
links = self.orm.query(Link).all()
self.background_render('links.html', links=links)
@tornado.web.authenticated
def post(self):
action = self.get_argument('action', None)
if action == 'add':
name = self.get_argument('name', '')
url = self.get_argument('url', '')
if not name or not url:
return self.redirect('/kamisama/links')
self.orm.add(Link(name=name, url=url)) |
self.orm.commit()
return self.redirect('/kamisama/links')
elif action == 'del':
link_id = self.get_argument('id', 0)
if link_id:
link = self.orm.query(Li | nk).filter(Link.id == link_id).one()
self.orm.delete(link)
self.orm.commit()
|
import datetime
def suffix(d | ):
return 'th' if 11<=d<=13 else {1:'st',2:'nd',3:'rd'}.get(d%10, 'th')
def custom_strftime(format, t):
return t.strftime(format).replace('{S}', str(t.day) + suffix(t.day))
print "Welcome to GenerateUpdateLines, the nation's favourite automatic update line generator."
start = int(raw_input("Enter initial day number: "))
stop = int(raw_input("Enter final day number: "))
t0 = datetime.date(2018, 3, 24)
for d in | range(start, stop+1):
date = t0 + datetime.timedelta(d-1)
print "| "+str(d)+" | "+custom_strftime("%a {S} %B", date)+" | | |"
# from datetime import datetime as dt
#
# def suffix(d):
# return 'th' if 11<=d<=13 else {1:'st',2:'nd',3:'rd'}.get(d%10, 'th')
#
# def custom_strftime(format, t):
# return t.strftime(format).replace('{S}', str(t.day) + suffix(t.day))
#
# print custom_strftime('%B {S}, %Y', dt.now())
|
to
show code editor tabs.
"""
import logging
import os
from pyqode.core.dialogs.unsaved_files import DlgUnsavedFiles
from pyqode.core.modes.filewatcher import FileWatcherMode
from pyqode.core.widgets.tab_bar import TabBar
from pyqode.qt import QtCore, QtWidgets
from pyqode.qt.QtWidgets import QTabBar, QTabWidget
def _logger():
return logging.getLogger(__name__)
class TabWidget(QTabWidget):
"""
QTabWidget specialised to hold CodeEdit instances (or any other
object that has the same interace).
It ensures that there is only one open editor tab for a specific file path,
it adds a few utility methods to quickly manipulate the current editor
widget. It will automatically rename tabs that share the same base filename
to include their distinctive parent directory.
It handles tab close requests automatically and show a dialog box when
a dirty tab widget is being closed. It also adds a convenience QTabBar
with a "close", "close others" and "close all" menu. (You can add custom
actions by using the addAction and addSeparator methods).
It exposes a variety of signal and slots for a better integration with
your applications( dirty_changed, save_current, save_all, close_all,
close_current, close_others).
.. deprecated: starting from version 2.4, this widget is considered as
deprecated. You should use
:class:`pyqode.core.widgets.SplittableTabWidget` instead. It will be
removed in version 2.6.
"""
#: Signal emitted when a tab dirty flag changed
dirty_changed = QtCore.Signal(bool)
#: Signal emitted when the last tab has been closed
last_tab_closed = QtCore.Signal()
#: Signal emitted when a tab has been closed
tab_closed = QtCore.Signal(QtWidgets.QWidget)
@property
def active_editor(self):
"""
Returns the current editor widget or None if the current tab widget is
not a subclass of CodeEdit or if there is no open tab.
"""
return self._current
def __init__(self, parent):
QtWidgets.QTabWidget.__init__(self, parent)
self._current = None
self.currentChanged.connect(self._on_current_changed)
self.tabCloseRequested.connect(self._on_tab_close_requested)
tab_bar = TabBar(self)
tab_bar.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
tab_bar.customContextMenuRequested.connect(self._show_tab_context_menu)
self.setTabBar(tab_bar)
self.tab_bar = tab_bar
self._context_mnu = QtWidgets.QMenu()
for name, slot in [('Close', self.close),
('Close others', self.close_others),
('Close all', self.close_all)]:
qaction = QtWidgets.QAction(name, self)
qaction.triggered.connect(slot)
self._context_mnu.addAction(qaction)
self.addAction(qaction)
# keep a list of widgets (to avoid PyQt bug where
# the C++ class loose the wrapped obj type).
self._widgets = []
@QtCore.Slot()
def close(self):
"""
Closes the active editor
"""
self.tabCloseRequested.emit(self.currentIndex())
@QtCore.Slot()
def close_others(self):
"""
Closes every editors tabs except the current one.
"""
current_widget = self.currentWidget()
self._try_close_dirty_tabs(exept=current_widget)
i = 0
while self.count() > 1:
widget = self.widget(i)
if widget != current_widget:
self.removeTab(i)
else:
i = 1
@QtCore.Slot()
def close_all(self):
"""
Closes all editors
"""
if self._try_close_dirty_tabs():
while self.count():
widget = self.widget(0)
self.removeTab(0)
self.tab_closed.emit(widget)
return True
return False
def _ensure_unique_name(self, code_edit, name):
if name is not None:
code_edit._tab_name = name
else:
code_edit._tab_name = code_edit.file.name
file_name = code_edit.file.name
if self._name_exists(file_name):
file_name = self._rename_duplicate_tabs(
code_edit, code_edit.file.name, code_edit.file.path)
code_edit._tab_name = file_name
@QtCore.Slot()
def save_current(self, path=None):
"""
Save current editor content. Leave file to None to erase the previous
file content. If the current editor's file_path is None and path
is None, the function will call
``QtWidgets.QFileDialog.getSaveFileName`` to get a valid save filename.
:param path: path of the file to save, leave it None to overwrite
existing file.
"""
try:
if not path and not self._current.file.path:
path, filter = QtWidgets.QFileDialog.getSaveFileName(
self, 'Choose destination path')
if not path:
return False
old_path = self._current.file.path
code_edit = self._current
self._save_editor(code_edit, path)
path = code_edit.file.path
# path (and icon) may have changed
if path and old_path != path:
self._ensure_unique_name(code_edit, code_edit.file.name)
self.setTabText(self.currentIndex(), code_edit._tab_name)
ext = os.path.splitext(path)[1]
old_ext = os.path.splitext(old_path)[1]
if ext != old_ext or not old_path:
icon = QtWidgets.QFileIconProvider().icon(
QtCore.QFileInfo(code_edit.file.path))
self.setTabIcon(self.currentIndex(), icon)
return True
except AttributeError: # not an editor widget
pass
return False
@QtCore.Slot()
def save_all(self):
"""
Save all editors.
"""
| initial_index = self.currentIndex()
for i in range(self.count()):
try:
self.setCurrentIndex(i)
self.save_current()
except AttributeError:
pass
self.setCurrentInde | x(initial_index)
def addAction(self, action):
"""
Adds an action to the TabBar context menu
:param action: QAction to append
"""
self._context_mnu.addAction(action)
def add_separator(self):
"""
Adds a separator to the TabBar context menu.
:returns The separator action.
"""
return self._context_mnu.addSeparator()
def index_from_filename(self, path):
"""
Checks if the path is already open in an editor tab.
:param path: path to check
:returns: The tab index if found or -1
"""
if path:
for i in range(self.count()):
widget = self.widget(i)
try:
if widget.file.path == path:
return i
except AttributeError:
pass # not an editor widget
return -1
@staticmethod
def _del_code_edit(code_edit):
try:
code_edit.close()
code_edit.delete()
except AttributeError:
pass
del code_edit
def add_code_edit(self, code_edit, name=None):
"""
Adds a code edit tab, sets its text as the editor.file.name and
sets it as the active tab.
The widget is only added if there is no other editor tab open with the
same filename, else the already open tab is set as current.
If the widget file path is empty, i.e. this is a new document that has
not been saved to disk, you may provided a formatted string
such as 'New document %d.txt' for the document name. The int format
will be automatically replaced by the number of new documents
(e.g. 'New document 1.txt' then 'New document 2.txt' and so on).
If you prefer to use yo |
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the volume backup RPC API.
"""
from oslo_config import cfg
from oslo_log import log as logging
from jacket import rpc
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class BackupAPI(rpc.RPCAPI):
"""Client side of the volume rpc API.
API version history:
1.0 - Initial version.
1.1 - Changed methods to accept backup objects instead of IDs.
1.2 - A version that got in by mistake (without breaking anything).
1.3 - Dummy version bump to mark start of having storage-backup service
decoupled from storage-volume.
... Mitaka supports messaging 1.3. Any changes to existing methods in
1.x after this point should be done so tha | t they can handle version cap
set to 1.3.
2.0 - Remove 1.x compatibility
"""
| RPC_API_VERSION = '1.3'
TOPIC = CONF.backup_topic
BINARY = 'storage-backup'
def _compat_ver(self, current, legacy):
if self.client.can_send_version(current):
return current
else:
return legacy
def create_backup(self, ctxt, backup):
LOG.debug("create_backup in rpcapi backup_id %s", backup.id)
version = self._compat_ver('2.0', '1.1')
cctxt = self.client.prepare(server=backup.host, version=version)
cctxt.cast(ctxt, 'create_backup', backup=backup)
def restore_backup(self, ctxt, volume_host, backup, volume_id):
LOG.debug("restore_backup in rpcapi backup_id %s", backup.id)
version = self._compat_ver('2.0', '1.1')
cctxt = self.client.prepare(server=volume_host, version=version)
cctxt.cast(ctxt, 'restore_backup', backup=backup,
volume_id=volume_id)
def delete_backup(self, ctxt, backup):
LOG.debug("delete_backup rpcapi backup_id %s", backup.id)
version = self._compat_ver('2.0', '1.1')
cctxt = self.client.prepare(server=backup.host, version=version)
cctxt.cast(ctxt, 'delete_backup', backup=backup)
def export_record(self, ctxt, backup):
LOG.debug("export_record in rpcapi backup_id %(id)s "
"on host %(host)s.",
{'id': backup.id,
'host': backup.host})
version = self._compat_ver('2.0', '1.1')
cctxt = self.client.prepare(server=backup.host, version=version)
return cctxt.call(ctxt, 'export_record', backup=backup)
def import_record(self,
ctxt,
host,
backup,
backup_service,
backup_url,
backup_hosts):
LOG.debug("import_record rpcapi backup id %(id)s "
"on host %(host)s for backup_url %(url)s.",
{'id': backup.id,
'host': host,
'url': backup_url})
version = self._compat_ver('2.0', '1.1')
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'import_record',
backup=backup,
backup_service=backup_service,
backup_url=backup_url,
backup_hosts=backup_hosts)
def reset_status(self, ctxt, backup, status):
LOG.debug("reset_status in rpcapi backup_id %(id)s "
"on host %(host)s.",
{'id': backup.id,
'host': backup.host})
version = self._compat_ver('2.0', '1.1')
cctxt = self.client.prepare(server=backup.host, version=version)
return cctxt.cast(ctxt, 'reset_status', backup=backup, status=status)
def check_support_to_force_delete(self, ctxt, host):
LOG.debug("Check if backup driver supports force delete "
"on host %(host)s.", {'host': host})
version = self._compat_ver('2.0', '1.1')
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'check_support_to_force_delete')
|
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="name",
extensions=None,
),
is_container="list",
yang_name="p2p-primary-path",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """p2p_primary_path must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("name",p2p_primary_path_.p2p_primary_path, yang_name="p2p-primary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name="p2p-primary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)""",
}
)
self.__p2p_primary_path = t
if hasattr(self, "_set"):
self._set()
def _unset_p2p_primary_path(self):
self.__p2p_primary_path = YANGDynClass(
base=YANGListType(
"name",
p2p_primary_path_.p2p_primary_pa | th,
yang_name="p2p-primary-path",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
| yang_keys="name",
extensions=None,
),
is_container="list",
yang_name="p2p-primary-path",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
p2p_primary_path = __builtin__.property(
_get_p2p_primary_path, _set_p2p_primary_path
)
_pyangbind_elements = OrderedDict([("p2p_primary_path", p2p_primary_path)])
from . import p2p_primary_path_
class p2p_primary_path(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/mpls/lsps/constrained-path/tunnels/tunnel/p2p-tunnel-attributes/p2p-primary-path. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Primary paths associated with the LSP
"""
__slots__ = ("_path_helper", "_extmethods", "__p2p_primary_path")
_yang_name = "p2p-primary-path"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__p2p_primary_path = YANGDynClass(
base=YANGListType(
"name",
p2p_primary_path_.p2p_primary_path,
yang_name="p2p-primary-path",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="name",
extensions=None,
),
is_container="list",
yang_name="p2p-primary-path",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"lsps",
"constrained-path",
"tunnels",
"tunnel",
"p2p-tunnel-attributes",
"p2p-primary-path",
]
def _get_p2p_primary_path(self):
"""
Getter method for p2p_primary_path, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path (list)
YANG Description: List of p2p primary paths for a tunnel
"""
return self.__p2p_primary_path
def _set_p2p_primary_path(self, v, load=False):
"""
Setter method for p2p_primary_path, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_p2p_primary_path is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_p2p_primary_path() directly.
YANG Description: List of p2p primary paths for a tunnel
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"name",
p2p_primary_path_.p2p_primary_path,
yang_name="p2p-primary-path",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="name",
extensions=None,
),
is_container="list",
yang_name="p2p-primary-path",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """p2p_primary_path must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("name",p2p_primary_path_.p2p_primary_path, yang_name="p2p-primary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=No |
"""coop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from rest_framework import routers
from guide.views import area, ArtificialProblemViewSet,NaturalProblemViewSet,ProblemImageViewSet, AreaViewSet, SectorViewSet
from members.views import UserViewSet,MemberViewSet
admin.site.site_header='Galway Climbing Co-op admin'
admin.site.site_title='Galway Climbing Co-op admin'
#admin.site.index_title='Galway Climbing Co-op admin'
# django rest framework url routers for viewsets
router = routers.DefaultRouter()
router.register(r'artificialproblems',ArtificialProblemViewSet)
router.register(r'naturalproblems',NaturalProblemViewSet)
router.register(r'problemimages',ProblemImageViewSet)
router.register(r'users',UserViewSet)
router.register(r'members',MemberViewSet)
router.register(r'areas',AreaViewSet)
router.register(r'sectors',SectorViewSet)
from guide.views import area_map
urlpatterns = [
url(r'api/', include(router.urls)),
url(r'api-auth/',include('rest_framework.urls',namespace='rest_framework')),
url(r'^admin/', a | dmin.site.urls),
url(r'^$',area_map,{'area_id':1}),
url(r'^guide/', include('guide.urls',namespace="guide")),
url(r'^home/', include('homepage.urls',namespace="homepage")),
url(r'^members/auth/', include('members.urls')),
# note that the (customis | ed) templates for the auth views are in [BASE_DIR]/templates/registration
url(r'^members/', include('members.urls',namespace="members")),
] + static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
|
rror(message)
def get_latest_event_id(self):
"""Query the ftp server and determine the latest event id.
:return: A string containing a valid event id.
:raises: NetworkError
"""
ftp_client = FtpClient()
try:
ftp_client_list = ftp_client.get_listing()
ftp_client_list.sort(key=lambda x: x.lower())
except NetworkError:
raise
now = datetime.now()
now = int(
'%04d%02d%02d%02d%02d%02d' % (
now.year, now.month, now.day, now.hour, now.minute, now.second
))
event_id = now + 1
while int(event_id) > now:
if len(ftp_client_list) < 1:
raise EventIdError('Latest Event Id could not be obtained')
event_id = ftp_client_list.pop().split('/')[-1].split('.')[0]
if event_id is None:
raise EventIdError('Latest Event Id could not be obtained')
self.event_id = event_id
def is_on_server(self):
"""Check the event associated with this instance exists on the server.
:return: True if valid, False if not
:raises: NetworkError
"""
input_file_name, output_file_name = self.file_names()
file_list = [input_file_name, output_file_name]
ftp_client = FtpClient()
return ftp_client.has_files(file_list)
def file_names(self):
"""Return file names for the inp and out files based on the event id.
e.g. 20120726022003.inp.zip, 20120726022003.out.zip
:return: Tuple Consisting of inp and out local cache
paths.
:rtype: tuple (str, str)
:raises: None
"""
input_file_name = '%s.inp.zip' % self.event_id
output_file_name = '%s.out.zip' % self.event_id
return input_file_name, output_file_name
def cache_paths(self):
"""Return the paths to the inp and out files as expected locally.
:return: Tuple consisting of inp and out local cache paths.
:rtype: tuple (str, str)
:raises: None
"""
input_file_name, output_file_name = self.file_names()
input_file_path = os.path.join(shakemap_zip_dir(), input_file_name)
output_file_path = os.path.join(shakemap_zip_dir(), output_file_name)
return input_file_path, output_file_path
def is_cached(self):
"""Check the event associated with this instance exists in cache.
:return: True if locally cached, False if not
:raises: None
"""
input_file_path, output_file_path = self.cache_paths()
if os.path.exists(input_file_path) and \
os.path.exists(output_file_path):
# TODO: we should actually try to unpack them for deeper validation
return True
else:
LOGGER.debug('%s is not cached' % input_file_path)
LOGGER.debug('%s is not cached' % output_file_path)
return False
def validate_event(self):
"""Check that the event associated with this instance exists either
in the local event cache, or on the remote ftp site.
:return: True if valid, False if not
:raises: NetworkError
"""
# First check local cache
if self.is_cached():
return True
else:
return self.is_on_server()
#noinspection PyMethodMayBeStatic
def _fetch_file(self, event_file, retries=3):
"""Private helper to fetch a file from the ftp site.
e.g. for event 20110413170148 this file would be fetched::
ftp://118.97.83.243/20110413170148.inp.zip
and this local file created::
/tmp/realtime/20110413170148.inp.zip
.. note:: If a cached copy of the file exits, the path to the cache
copy will simpl | y be returned without invoking any network requests.
:param event_file: Filename on server | e.g.20110413170148.inp.zip
:type event_file: str
:param retries: Number of reattempts that should be made in
in case of network error etc.
:type retries: int
:return: A string for the dataset path on the local storage system.
:rtype: str
:raises: EventUndefinedError, NetworkError
"""
# Return the cache copy if it exists
local_path = os.path.join(shakemap_zip_dir(), event_file)
if os.path.exists(local_path):
return local_path
#Otherwise try to fetch it using ftp
for counter in range(retries):
last_error = None
try:
client = FtpClient()
client.get_file(event_file, local_path)
except NetworkError, e:
last_error = e
except:
LOGGER.exception(
'Could not fetch shake event from server %s'
% event_file)
raise
if last_error is None:
return local_path
LOGGER.info('Fetching failed, attempt %s' % counter)
LOGGER.exception('Could not fetch shake event from server %s'
% event_file)
raise Exception('Could not fetch shake event from server %s'
% event_file)
def fetch_input(self):
"""Fetch the input file for the event id associated with this class
e.g. for event 20110413170148 this file would be fetched::
ftp://118.97.83.243/20110413170148.inp.zip
and this local file created::
/tmp/realtime/20110413170148.inp.zip
:return: A string for the 'inp' dataset path on the local storage
system.
:raises: EventUndefinedError, NetworkError
"""
if self.event_id is None:
raise EventUndefinedError('Event is none')
event_file = '%s.inp.zip' % self.event_id
try:
return self._fetch_file(event_file)
except (EventUndefinedError, NetworkError):
raise
def fetch_output(self):
"""Fetch the output file for the event id associated with this class.
e.g. for event 20110413170148 this file would be fetched::
ftp://118.97.83.243/20110413170148.out.zip
and this local file created::
/tmp/realtime/20110413170148.out.zip
:return: A string for the 'out' dataset path on the local storage
system.
:raises: EventUndefinedError, NetworkError
"""
if self.event_id is None:
raise EventUndefinedError('Event is none')
event_file = '%s.out.zip' % self.event_id
try:
return self._fetch_file(event_file)
except (EventUndefinedError, NetworkError):
raise
def fetch_event(self):
"""Fetch both the input and output shake data from the server for
the event id associated with this class.
:return: A two tuple where the first item is the inp dataset path and
the second the out dataset path on the local storage system.
:raises: EventUndefinedError, NetworkError
"""
if self.event_id is None:
raise EventUndefinedError('Event is none')
try:
input_file = self.fetch_input()
output_file = self.fetch_output()
except (EventUndefinedError, NetworkError):
raise
return input_file, output_file
def extract(self, force_flag=False):
"""Extract the zipped resources. The two zips associated with this
shakemap will be extracted to e.g.
:file:`/tmp/inasafe/realtime/shakemaps-extracted/20120726022003`
After extraction the complete path will appear something like this:
:file:`/tmp/inasafe/realtime/shakemaps-extracted/
20120726022003/usr/local/smap/data/20120726022003`
with input and output directories appearing beneath that.
This method will then move the grid.xml file up to the root of
the extract dir and recursively remove the extracted dirs.
After this final step, the follow |
on chip version "+str(self.chip.chipID))
#
# DCCAL_STAT (0x05C1)
#
# DCCAL_CALSTATUS<7:0>
@property
def DCCAL_CALSTATUS(self):
"""
Get the value of DCCAL_CALSTATUS<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('STAT', 'DCCAL_CALSTATUS<7:0>')
else:
raise ValueError("Bitfield DCCAL_CALSTATUS<7:0> is not supported on chip version "+str(self.chip.chipID))
@DCCAL_CALSTATUS.setter
def DCCAL_CALSTATUS(self, value):
"""
Set the value of DCCAL_CALSTATUS<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('STAT', 'DCCAL_CALSTATUS<7:0>', value)
else:
raise ValueError("Bitfield DCCAL_CALSTATUS<7:0> is not supported on chip version "+str(self.chip.chipID))
# DCCAL_CMPSTATUS<7:0>
@property
def DCCAL_CMPSTATUS(self):
"""
Get the value of DCCAL_CMPSTATUS<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('STAT', 'DCCAL_CMPSTATUS<7:0>')
else:
raise ValueError("Bitfield DCCAL_CMPSTATUS<7:0> is not supported on chip version "+str(self.chip.chipID))
@DCCAL_CMPSTATUS.setter
def DCCAL_CMPSTATUS(self, value):
"""
Set the value of DCCAL_CMPSTATUS<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('STAT', 'DCCAL_CMPSTATUS<7:0>', value)
else:
raise ValueError("Bitfield DCCAL_CMPSTATUS<7:0> is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_CFG2 (0x05C2)
#
# DCCAL_CMPCFG<7:0>
@property
def DCCAL_CMPCFG(self):
"""
Get the value of DCCAL_CMPCFG<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG2', 'DCCAL_CMPCFG<7:0>')
else:
raise ValueError("Bitfield DCCAL_CMPCFG<7:0> is not supported on chip version "+str(self.chip.chipID))
@DCCAL_CMPCFG.setter
def DCCAL_CMPCFG(self, value):
"""
Set the value of DCCAL_CMPCFG<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('CFG2', 'DCCAL_CMPCFG<7:0>', value)
else:
raise ValueError("Bitfield DCCAL_CMPCFG<7:0> is not supported on chip version "+str(self.chip.chipID))
# DCCAL_START<7:0>
@property
def DCCAL_START(self):
"""
Get the value of DCCAL_START<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG2', 'DCCAL_START<7:0>')
else:
raise ValueError("Bitfield DCCAL_START<7:0> is not supported on chip version "+str(self.chip.chipID))
@DCCAL_START.setter
def DCCAL_START(self, value):
"""
Set the value of DCCAL_START<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('CFG2', 'DCCAL_START<7:0>', value)
else:
raise ValueError("Bitfield DCCAL_START<7:0> is not supported on chip version "+str(self.chip.chipID))
def startRXBQ(self):
"""
Starts RXBQ calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<7
def startRXBI(self):
"""
Starts RXBI calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<6
def startRXAQ(self):
"""
Starts RXAQ calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<5
def startRXAI(self):
"""
Starts RXAI calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<4
def startTXBQ(self):
"""
Starts TXBQ calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<3
def startTXBI(self):
"""
Starts TXBI calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<2
def startTXAQ(self):
"""
Starts TXAQ calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<1
def startTXAI(self):
"""
Starts TXAI calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1
#
# DCCAL_TXAI (0x05C3)
#
@property
def DC_TXAI(self):
"""
Get the value of DC_TXAI
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('TXAI', 'DCRD_TXAI', 0)
self._writeReg('TXAI', 'DCRD_TXAI', 1)
val = self._readReg('TXAI', 'DC_TXAI<10:0>')
return self.signMagnitudeToInt(val, 11)
else:
raise ValueError("Bitfield DC_TXAI is not supported on chip version "+str(self.chip.chipID))
@DC_TXAI.setter
def DC_TXAI(self, value):
"""
Set the value of DC_TXAI
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-1024<= value <=1024):
raise ValueError("Value must be [-1024..1024]")
val = self.intToSignMagnitude(value, 11)
self._writeReg('TXAI', 'DC_TXAI<10:0>', val)
self._writeReg('TXAI', 'DCWR_TXAI', 0)
self._writeReg('TXAI', 'DCWR_TXAI', 1)
else:
raise ValueError("Bitfield TXAI is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_TXAQ (0x05C4)
#
@property
def DC_TXAQ(self):
"""
Get the value of DC_TXAQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('TXAQ', 'DCRD_TXAQ', 0)
self._writeReg('TXAQ', 'DCRD_TXAQ', 1)
val = self._readReg('TXAQ', 'DC_TXAQ<10:0>')
return self.signMagnitudeToInt(val, 11)
else:
raise ValueError("Bitfield DC_TXAQ is not supported on chip version "+str(self.chip.chipID))
@DC_TXAQ.setter
def DC_TXAQ(self, value):
"""
Set the value of DC_TXAQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-1024<= value <=1024):
raise ValueError("Value must be [ | -1024..1024]")
val = self.intToSignMagnitude(value, 11)
self._writeReg('TXAQ', 'DC_TXAQ<10:0>', val)
self._writ | eReg('TXAQ', 'DCWR_TXAQ', 0)
self._writeReg('TXAQ', 'DCWR_TXAQ', 1)
else:
raise ValueError("Bitfield TXAQ is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_TXBI (0x05C5)
#
@property
def DC_TXBI(self):
"""
Get the value of DC_TXBI
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('TXBI', 'DCRD_TXBI', 0)
self._writeReg('TXBI', 'DCRD_TXBI', 1)
val = self._readReg('TXBI', 'DC_TXBI<10:0>')
return self.signMagnitudeToInt(val, 11)
else:
raise ValueError("Bitfield DC_TXBI is not supported on chip version "+str(self.chip.chipID))
@DC_TXBI.setter
def DC_TXBI(self, value):
"""
Set the value of DC_TXBI
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-1024<= value <=1024):
raise ValueError("Value must be [-1024..1024]")
val = self.intToSignMagnitude(value, 11)
self._writeReg('TXBI', 'DC_TXBI<10:0>', val)
self._writeReg('TXBI', 'DCWR_TXBI', 0)
self._writeReg('TXBI', 'DCWR_TXBI', 1)
else:
|
"""
Grab screen data from OMERO based on Screen ID
"""
import csv
import multiprocessing
import progressbar
import signal
import sys
import time
import requests
import json
from argparse import ArgumentParser
import omeroidr.connect as connect
from omeroidr.data import Data
parser = ArgumentParser(prog='OMERO screen data downloader')
parser.add_argument('-i', '--id', help='Id of the screen')
parser.add_argument('-o', '--output', required=False, default='omero.tab', help='Path to the tab separated output file')
parser.add_argument('-s', '--server', required=False, default='http://idr-demo.openmicroscopy.org', help='Base url for OMERO server')
parser.add_argument('-u', '--user', required=False, help='OMERO Username')
parser.add_argument('-w', '--password', required=False, help='OMERO Password')
pargs = parser.parse_args()
# list of well metadata
wells_data = []
|
# initialize the progress bar
widgets = [progressbar.Percentage(), " ", progressbar.Bar(), " ", progressbar.ETA()]
pbar = progressbar.ProgressBar(widgets=widgets)
def init_worker():
"""
Initialise multiprocessi | ng pool
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
def well_details_callback(well):
"""
Callback from apply_async. Used to update progress bar
:param well: Well metadata object
"""
pbar.update(pbar.previous_value + 1)
# append well the wells data list
wells_data.append(well)
def main():
# login
session = connect.connect_to_omero(pargs.server, pargs.user, pargs.password)
# init data
omero_data = Data(session, pargs.server)
# get wells for screen
print('loading plates...')
wells = omero_data.get_wells(pargs.id)
print('Retrieving annotations...')
# get all annotations
# using a pool of processes
p = multiprocessing.Pool(multiprocessing.cpu_count(), init_worker)
pbar.max_value = len(wells)
pbar.start()
for well in wells:
p.apply_async(omero_data.get_well_details, args=(well,), callback=well_details_callback)
try:
# wait 10 seconds, this allows for the capture of the KeyboardInterrupt exception
time.sleep(10)
except KeyboardInterrupt:
p.terminate()
p.join()
disconnect(session, pargs.server)
print('exiting...')
sys.exit(0)
finally:
p.close()
p.join()
pbar.finish()
# sort results by id
wells_sorted = sorted(wells_data, key=lambda k: k['id'])
print('Writing flat file...')
# build a dict of all keys which will form the header row of the flat file
# this is necessary as the metadata key-value pairs might not be uniform across the dataet
columns = set()
for well in wells_sorted:
columns |= set(well.keys())
# write to a tab delimited file
with open(pargs.output, 'w') as output:
w = csv.DictWriter(output, columns, delimiter='\t', lineterminator='\n')
w.writeheader()
w.writerows(wells_sorted)
output.close()
connect.disconnect(session, pargs.server)
print('Metadata fetch complete')
if __name__ == '__main__':
main()
|
ystemAccessAccount (Opnum 24)
class LsarSetSystemAccessAccount(NDRCALL):
opnum = 24
structure = (
('AccountHandle', LSAPR_HANDLE),
('SystemAccess', ULONG),
)
class LsarSetSystemAccessAccountResponse(NDRCALL):
structure = (
('ErrorCode', NTSTATUS),
)
# 3.1.4.5.9 LsarEnumerateAccountsWithUserRight (Opnum 35)
class LsarEnumerateAccountsWithUserRight(NDRCALL):
opnum = 35
structure = (
('PolicyHandle', LSAPR_HANDLE),
('UserRight', PRPC_UNICODE_STRING),
)
class LsarEnumerateAccountsWithUserRightResponse(NDRCALL):
structure = (
('EnumerationBuffer',LSAPR_ACCOUNT_ENUM_BUFFER),
('ErrorCode', NTSTATUS),
)
# 3.1.4.5.10 LsarEnumerateAccountRights (Opnum 36)
class LsarEnumerateAccountRights(NDRCALL):
opnum = 36
structure = (
('PolicyHandle', LSAPR_HANDLE),
('AccountSid', RPC_SID),
)
class LsarEnumerateAccountRightsResponse(NDRCALL):
structure = (
('UserRights',LSAPR_USER_RIGHT_SET),
('ErrorCode', NTSTATUS),
)
# 3.1.4.5.11 LsarAddAccountRights (Opnum 37)
class LsarAddAccountRights(NDRCALL):
opnum = 37
structure = (
('PolicyHandle', LSAPR_HANDLE),
('AccountSid', RPC_SID),
('UserRights',LSAPR_USER_RIGHT_SET),
)
class LsarAddAccountRightsResponse(NDRCALL):
structure = (
('ErrorCode', NTSTATUS),
)
# 3.1.4.5.12 LsarRemoveAccountRights (Opnum 38)
class LsarRemoveAccountRights(NDRCALL):
opnum = 38
structure = (
('PolicyHandle', LSAPR_HANDLE),
('AccountSid', RPC_SID),
('AllRights', UCHAR),
('UserRights',LSAPR_USER_RIGHT_SET),
)
class LsarRemoveAccountRightsResponse(NDRCALL):
structure = (
('ErrorCode', NTSTATUS),
)
# 3.1.4.6.1 LsarCreateSecret (Opnum 16)
class LsarCreateSecret(NDRCALL):
opnum = 16
structure = (
('PolicyHandle', LSAPR_HANDLE),
('SecretName', RPC_UNICODE_STRING),
('DesiredAccess', ACCESS_MASK),
)
class LsarCreateSecretResponse(NDRCALL):
structure = (
('SecretHandle', LSAPR_HANDLE),
('ErrorCode', NTSTATUS),
)
# 3.1.4.6.2 LsarOpenSecret (Opnum 28)
class LsarOpenSecret(NDRCALL):
opnum = 28
structure = (
('PolicyHandle', LSAPR_HANDLE),
('SecretName', RPC_UNICODE_STRING),
('DesiredAccess', ACCESS_MASK),
)
class LsarOpenSecretResponse(NDRCALL):
structure = (
('SecretHandle', LSAPR_HANDLE),
('ErrorCode', NTSTATUS),
)
# 3.1.4.6.3 LsarSetSecret (Opnum 29)
class LsarSetSecret(NDRCALL):
opnum = 29
structure = (
('SecretHandle', LSAPR_HANDLE),
('EncryptedCurrentValue', PLSAPR_CR_CIPHER_VALUE),
('EncryptedOldValue', PLSAPR_CR_CIPHER_VALUE),
)
class LsarSetSecretResponse(NDRCALL):
structure = (
('ErrorCode', NTSTATUS),
)
# 3.1.4.6.4 LsarQuerySecret (Opnum 30)
class LsarQuerySecret(NDRCALL):
opnum = 30
structure = (
('SecretHandle', LSAPR_HANDLE),
('EncryptedCurrentValue', PPLSAPR_CR_CIPHER_VALUE),
('CurrentValueSetTime', PLARGE_INTEGER),
('EncryptedOldValue', PPLSAPR_CR_CIPHER_VALUE),
('OldValueSetTime', PLARGE_INTEGER),
)
class LsarQuerySecretResponse(NDRCALL):
structure = (
('EncryptedCurrentValue', PPLSAPR_CR_CIPHER_VALUE),
('CurrentValueSetTime', PLARGE_INTEGER),
('EncryptedOldValue', PPLSAPR_CR_CIPHER_VALUE),
('OldValueSetTime', PLARGE_INTEGER),
('ErrorCode', NTSTATUS),
)
# 3.1.4.6.5 LsarStorePrivateData (Opnum 42)
class LsarStorePrivateData(NDRCALL):
opnum = 42
structure = (
('PolicyHandle', LSAPR_HANDLE),
('KeyName', RPC_UNICODE_STRING),
('EncryptedData', PLSAPR_CR_CIPHER_VALUE),
)
class LsarStorePrivateDataResponse(NDRCALL):
structure = (
('ErrorCode', NTSTATUS),
)
# 3.1.4.6.6 LsarRetrievePrivateData (Opnum 43)
class LsarRetrievePrivateData(NDRCALL):
opnum = 43
structure = (
('PolicyHandle', LSAPR_HANDLE),
('KeyName', RPC_UNICODE_STRING),
('EncryptedData', PLSAPR_CR_CIPHER_VALUE),
)
class LsarRetrievePrivateDataResponse(NDRCALL):
structure = (
('EncryptedData', PLSAPR_CR_CIPHER_VALUE),
('ErrorCode', NTSTATUS),
)
# 3.1.4.7.1 LsarOpenTrustedDomain (Opnum 25)
# 3.1.4.7.1 LsarQueryInfoTrustedDomain (Opnum 26)
# 3.1.4.7.2 LsarQueryTrustedDomainInfo (Opnum 39)
# 3.1.4.7.3 LsarSetTrustedDomainInfo (Opnum 40)
# 3.1.4.7.4 LsarDeleteTrustedDomain (Opnum 41)
# 3.1.4.7.5 LsarQueryTrustedDomainInfoByName (Opnum 48)
# 3.1.4.7.6 LsarSetTrustedDomainInfoByName (Opnum 49)
# 3.1.4.7.7 LsarEnumerateTrustedDomainsEx (Opnum 50)
class LsarEnumerateTrustedDomainsEx(NDRCALL):
opnum = 50
structure = (
('PolicyHandle', LSAPR_HANDLE),
('EnumerationContext', ULONG),
('PreferedMaximumLength', ULONG),
)
class LsarEnumerateTrustedDomainsExResponse(NDRCALL):
structure = (
('EnumerationContext', ULONG),
('EnumerationBuffer',LSAPR_TRUSTED_ENUM_BUFFER_EX),
('ErrorCode', NTSTATUS),
)
# 3.1.4.7.8 LsarEnumerateTrustedDomains (Opnum 13)
class LsarEnumerateTrustedDomains(NDRCALL):
opnum = 13
structure = (
('PolicyHandle', LSAPR_HANDLE),
| ('EnumerationContext', ULONG),
('PreferedMaximumLength', ULONG),
)
class LsarEnumerateTrustedDomainsResponse(NDRCALL):
structure = (
('EnumerationContext', ULONG),
('EnumerationBuffer',LSAPR_TRUSTED_ENUM_BUFFER_EX),
('ErrorCode', NTSTATUS),
)
| # 3.1.4.7.9 LsarOpenTrustedDomainByName (Opnum 55)
# 3.1.4.7.10 LsarCreateTrustedDomainEx2 (Opnum 59)
# 3.1.4.7.11 LsarCreateTrustedDomainEx (Opnum 51)
# 3.1.4.7.12 LsarCreateTrustedDomain (Opnum 12)
# 3.1.4.7.14 LsarSetInformationTrustedDomain (Opnum 27)
# 3.1.4.7.15 LsarQueryForestTrustInformation (Opnum 73)
class LsarQueryForestTrustInformation(NDRCALL):
opnum = 73
structure = (
('PolicyHandle', LSAPR_HANDLE),
('TrustedDomainName', LSA_UNICODE_STRING),
('HighestRecordType', LSA_FOREST_TRUST_RECORD_TYPE),
)
class LsarQueryForestTrustInformationResponse(NDRCALL):
structure = (
('ForestTrustInfo', PLSA_FOREST_TRUST_INFORMATION),
('ErrorCode', NTSTATUS),
)
# 3.1.4.7.16 LsarSetForestTrustInformation (Opnum 74)
# 3.1.4.8.1 LsarEnumeratePrivileges (Opnum 2)
class LsarEnumeratePrivileges(NDRCALL):
opnum = 2
structure = (
('PolicyHandle', LSAPR_HANDLE),
('EnumerationContext', ULONG),
('PreferedMaximumLength', ULONG),
)
class LsarEnumeratePrivilegesResponse(NDRCALL):
structure = (
('EnumerationContext', ULONG),
('EnumerationBuffer', LSAPR_PRIVILEGE_ENUM_BUFFER),
('ErrorCode', NTSTATUS),
)
# 3.1.4.8.2 LsarLookupPrivilegeValue (Opnum 31)
class LsarLookupPrivilegeValue(NDRCALL):
opnum = 31
structure = (
('PolicyHandle', LSAPR_HANDLE),
('Name', RPC_UNICODE_STRING),
)
class LsarLookupPrivilegeValueResponse(NDRCALL):
structure = (
('Value', LUID),
('ErrorCode', NTSTATUS),
)
# 3.1.4.8.3 LsarLookupPrivilegeName (Opnum 32)
class LsarLookupPrivilegeName(NDRCALL):
opnum = 32
structure = (
('PolicyHandle', LSAPR_HANDLE),
('Value', LUID),
)
class LsarLookupPrivilegeNameResponse(NDRCALL):
structure = (
('Name', PRPC_UNICODE_STRING),
('ErrorCode', NTSTATUS),
)
# 3.1.4.8.4 LsarLookupPrivilegeDisplayName (Opnum 33)
class LsarLookupPrivilegeDisplayName(NDRCALL):
opnum = 33
structure = (
('PolicyHandle', LSAPR_HANDLE),
('Name', RPC_UNICODE_STRING),
('ClientLanguage', USHORT),
('ClientSystemDefaultLanguage', USHORT),
)
class LsarLookupPrivilegeDisplayNameResponse(NDRCALL):
structure = (
('Name', PRPC_UNICODE_STRING),
('LanguageReturned', UCHAR),
('ErrorCode', NTSTATUS),
)
# 3.1.4.9.1 LsarQuerySecurityObject (Opnum 3)
class LsarQuerySecurityObject(NDRCALL):
opnum = 3
structure = (
('PolicyHandle', LSAPR_HANDLE),
('SecurityInformation', SECURITY_INFORMATION),
)
class LsarQuerySecurityObjectResp |
for i in range (0, 53):
filepath = '/Users/tunder/Dropbox/PythonScripts/requests/pbs/fic' + str(i) + '.pbs'
with open(filepath, mode='w', encoding = 'utf-8') as file:
file.writ | e('#!/bin/bash\n')
file.write('#PBS -l walltime=10:00:00\n')
file.write('#PBS -l nodes=1:ppn=12\n')
file.write('#PBS -N Fiction' + str(i) + '\n')
file.write('#PBS -q ichass\n')
file.write('#PBS -m be\n')
file.write('cd $PBS_O_WORKDIR\n' | )
file.write('python3 extract.py -idfile /projects/ichass/usesofscale/hathimeta/pre20cslices/slice' + str(i) + '.txt -g fic -v -sub -rh' + '\n')
|
('ilevel',0)
if ilevel>8:
# Somewhere got into loop - quit
# ck.out('Warning: you have a cyclic dependency in your repositories ...')
return {'return':0, 'repo_deps':repo_deps}
# Load repo
r=ck.access({'action':'load',
'module_uoa':cfg['module_deps']['repo'],
'data_uoa':repo})
if r['return']>0: return r
d=r['dict']
# Note that sometimes we update .ckr.json while CK keeps old deps cached
p=d.get('path','')
p1=os.path.join(p, ck.cfg['repo_file'])
if os.path.isfile(p1):
r=ck.load_json_file({'json_file':p1})
if r['return']==0:
d=r['dict'].get('dict',{})
rd=d.get('repo_deps',{})
# print (level+repo)
for q in rd:
drepo=q['repo_uoa']
if drepo!=repo:
repo_deps.append(drepo)
r=recursive_repos({'repo':drepo, 'repo_deps':repo_deps, 'level':level+' ', 'ilevel':ilevel+1})
if r['return']>0: return r
return {'return':0, 'repo_deps':repo_deps}
##############################################################################
# prepare artifact snapshot
def snapshot(i):
"""
Input: {
repo - which repo to snapshot with all deps
(file_name) - customize name ("ck-artifacts-" by default)
(no_deps) - if 'yes', do not process repo dependencies (useful for results repo accompanying main repos)
(copy_repos) - if 'yes', copy repositories instead of zipping
(date) - use this date (YYYYMMDD) instead of current one
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import os
import platform
import zipfile
import shutil
o=i.get('out','')
repo=i.get('repo','')
if repo=='':
return {'return':1, 'error': '"repo" to snapshot is not defined'}
no_deps=i.get('no_deps','')=='yes'
copy_repos=i.get('copy_repos','')=='yes'
force_clean=i.get('force_clean','')=='yes'
# Preparing tmp directory where to zip repos and add scripts ...
curdir0=os.getcwd()
# ptmp=os.path.join(curdir0, 'tmp')
import tempfile
ptmp=os.path.join(tempfile.gettempdir(),'tmp-snapshot')
if o=='con':
ck.out('Temp directory: '+ptmp)
ck.out('')
if os.path.isdir(ptmp) and force_clean:
shutil.rmtree(ptmp, onerror=ck.rm_read_only)
if os.path.isdir(ptmp):
r=ck.inp({'text':'Directory "'+ptmp+'" exists. Delete (Y/n)?'})
if r['return']>0: return r
ck.out('')
x=r['string'].strip().lower()
if x=='' or x=='y' or x=='yes':
r=ck.delete_directory({'path':ptmp})
if r['return']>0: return r
if not os.path.isdir(ptmp):
os.makedirs(ptmp)
os.chdir(ptmp)
curdir=os.getcwd()
# Checking repo deps
final_repo_deps=[]
if not no_deps:
if o=='con':
ck.out('Checking dependencies on other repos ...')
r=recursive_repos({'repo':repo})
if r['return']>0: return r
# Removing redundant
for q in reversed(r['repo_deps']):
if q not in final_repo_deps:
final_repo_deps.append(q)
if repo not in final_repo_deps:
final_repo_deps.append(repo)
if o=='con':
ck.out('')
for q in final_repo_deps:
ck.out(' * '+q)
ck.out('')
ck.out('Collecting revisions, can take some time ...')
ck.out('')
r=ck.reload_repo_cache({}) # Ignore errors
pp=[]
pp2={}
il=0
path_to_main_repo=''
for xrepo in final_repo_deps:
# Reload repo to get UID
r=ck.access({'action':'load',
'module_uoa':cfg['module_deps']['repo'],
'data_uoa':xrepo})
if r['return']>0: return r
ruid=r['data_uid']
if ruid not in ck.cache_repo_info:
return {'return':1, 'error':'"'+q+'" repo is not in cache - strange!'}
# Get repo info
qq=ck.cache_repo_info[ruid]
d=qq['dict']
p=d.get('path','')
if xrepo==repo:
path_to_main_repo=p
t=d.get('shared','')
duoa=qq['data_uoa']
if t!='':
if l | en(duoa)>il: il=len(duoa)
url=d.get('url','')
branch=''
checkout=''
if os.path.isdir(p):
# Detect status
pc=os.getcwd()
os.chdir(p)
# Get current branch
r=ck.run_and_get_stdout({'cmd':['git','rev-parse','--abbrev-ref','HEAD']})
if r['return']==0 | and r['return_code']==0:
branch=r['stdout'].strip()
# Get current checkout
r=ck.run_and_get_stdout({'cmd':['git','rev-parse','--short','HEAD']})
if r['return']==0 and r['return_code']==0:
checkout=r['stdout'].strip()
os.chdir(pc)
x={'branch':branch, 'checkout':checkout, 'path':p, 'type':t, 'url':url, 'data_uoa':duoa}
else:
x={'path':p, 'type':t, 'data_uoa':duoa}
pp.append(x)
pp2[duoa]=x
if copy_repos:
pu=os.path.join(ptmp,'CK')
if not os.path.isdir(pu):
os.mkdir(pu)
pu1=os.path.join(pu,xrepo)
if o=='con':
ck.out(' * Copying repo '+xrepo+' ...')
shutil.copytree(p,pu1,ignore=shutil.ignore_patterns('*.pyc', 'tmp', 'tmp*', '__pycache__'))
# Copying Readme if exists
fr='README.md'
pr1=os.path.join(path_to_main_repo, fr)
if os.path.isfile(pr1):
pr2=os.path.join(ptmp, fr)
if os.path.isfile(pr2):
os.remove(pr2)
shutil.copy(pr1,pr2)
# Print
if o=='con':
ck.out('')
for q in pp:
name=q['data_uoa']
x=' * '+name+' '*(il-len(name))
branch=q.get('branch','')
checkout=q.get('checkout','')
url=q.get('url','')
if branch!='' or checkout!='' or url!='':
x+=' ( '+branch+' ; '+checkout+' ; '+url+' )'
ck.out(x)
os.chdir(curdir)
# Archiving
if o=='con':
ck.out('')
ck.out('Archiving ...')
# Add some dirs and files to ignore
for q in ['__pycache__', 'tmp', 'module.pyc', 'customize.pyc']:
if q not in ck.cfg['ignore_directories_when_archive_repo']:
ck.cfg['ignore_directories_when_archive_repo'].append(q)
# Get current date in YYYYMMDD
date=i.get('date','')
if date=='':
r=ck.get_current_date_time({})
if r['return']>0: return r
a=r['array']
a1=str(a['date_year'])
a2=str(a['date_month'])
a2='0'*(2-len(a2))+a2
a3=str(a['date_day'])
a3='0'*(2-len(a3))+a3
date=a1+a2+a3
date=date.strip()
if not copy_repos:
zips=[]
for repo in final_repo_deps:
if o=='con':
ck.out('')
ck.out(' * '+repo)
ck.out('')
an='ckr-'+repo
if pp2[repo].get('branch','')!='':
an+='--'+pp2[repo]['branch']
if pp2[repo].get('checkout','')!='':
an+='--'+pp2[repo]['checkout']
an+='.zip'
zips.append(an)
r=ck.access({'action':'zip',
'module_uoa':cfg['module_deps']['repo'],
'data_uoa':repo,
'archive_name':an,
'overwrite':'yes',
'out':o})
if r['return']>0: return r
# Print sequence of adding CK repos (for self-sustainable virtual CK artifact)
if o=='con':
ck.out('')
for z in zips:
ck.out('ck add repo --zip='+z)
# Cloning CK master
if o=='con':
ck.out('')
ck.out('Cloning latest CK version ...')
ck.out('')
os.system('git clone https://github.com/ctuning/ck ck-master')
# Prepare scripts
if o=='con':
ck.out('')
ck.out('Preparing sc |
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Core.IdentifiedObject import IdentifiedObject
class ErpInventory(IdentifiedObject):
"""Utility inventory-related information about an item or part (and not for description of the item and its attributes). It is used by ERP applications to enable the synchronization of Inventory data that exists on separate Item Master databases. This data is not t | he master data that describes the attributes of the item such as dimensions, weight, or unit of meas | ure - it describes the item as it exists at a specific location.Utility inventory-related information about an item or part (and not for description of the item and its attributes). It is used by ERP applications to enable the synchronization of Inventory data that exists on separate Item Master databases. This data is not the master data that describes the attributes of the item such as dimensions, weight, or unit of measure - it describes the item as it exists at a specific location.
"""
def __init__(self, Asset=None, status=None, *args, **kw_args):
"""Initialises a new 'ErpInventory' instance.
@param Asset:
@param status:
"""
self._Asset = None
self.Asset = Asset
self.status = status
super(ErpInventory, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["Asset", "status"]
_many_refs = []
def getAsset(self):
return self._Asset
def setAsset(self, value):
if self._Asset is not None:
self._Asset._ErpInventory = None
self._Asset = value
if self._Asset is not None:
self._Asset.ErpInventory = None
self._Asset._ErpInventory = self
Asset = property(getAsset, setAsset)
status = None
|
# -*- coding: utf-8 -*-#
"""
Basic Twitter Authentication
requirements: Python 2.5+ tweepy (easy_install tweepy | pip install tweepy)
"""
__author__ = 'Bernie Hogan'
__version__= '1.0'
import string
import codecs
import os
import pickle
import copy
import sys
import json
import webbrowser
import tweepy
from tweepy import Cursor
import twitterhelpers as th
def getFollowerCount(api, screen_name="BarackObama" | ):
user = api.get_user(screen_name)
return user.followers_count
def getFollowingCount(api, screen_name="BarackObama"):
user = api.get_user(screen_name)
print user
print dir(user)
return user.friends_count
if __name__=='__main__':
CONSUMER_KEY = th.CONSUMER_KEY
CONSUMER_SECRET = th. | CONSUMER_SECRET
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
ACCESS_TOKEN_SECRET = th.ACCESS_TOKEN_SECRET
ACCESS_TOKEN = th.ACCESS_TOKEN
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
print "Now you have received an access token."
print "Or rather, your account has authorized this application to use the twitter api."
print "You have this many hits to the API left this hour: "
# print json.dumps(api.rate_limit_status(), indent = 1) #['remaining_hits']
print getFollowerCount(api, "blurky")
print getFollowingCount(api, "blurky")
|
import json
from urllib import request
import pymongo
connection = pymongo.MongoClient('mongodb://localhost')
db = connection.reddit
stories = db.stories
# stories.drop()
# req = request.Request('http://www.reddit.com/r/technology/.json')
# req.add_header('User-agent', 'Mozilla/5.0')
# reddit_page = request.urlopen(req)
#
# parsed_reddit = json.loads(reddit_page.read().decode())
#
# print('Adding r | eddit posts')
# for item in parsed_reddit['data']['children']:
# stories.insert_one(item['data'])
#
# print('Finished adding reddit posts')
def find():
print('Keyword search started')
query = {'title': {'$regex': 'apple|google', '$options': 'i'}}
projection = {'title': 1, '_id': 0}
try:
cursor = stories.find(query, projection)
except Exception as e:
print('Unexpected error', type | (e), e)
for post in cursor:
print(post)
find()
|
# Copyright 2012-2013 Eric Ptak - trouch.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from webiopi.utils.types import toint
from webiopi.devices.spi import SPI
from webiopi.devices.analog import DAC
class MCP492X(SPI, DAC):
def __init__(self, chip, channelCoun | t, vref):
SPI.__init__(self, toint(chip), 0, 8, 10000000)
DAC.__init__(self, channelCount, 12, float(vref))
self.buffered=False
self.gain=False
self.shutdown=False
self.values = [0 for i in range(channelCount)]
def __str__(self):
return "MCP492%d(chip=%d)" % (self._analogCount, self.chip)
def __analogRead__(self, chann | el, diff=False):
return self.values[channel]
def __analogWrite__(self, channel, value):
d = bytearray(2)
d[0] = 0
d[0] |= (channel & 0x01) << 7
d[0] |= (self.buffered & 0x01) << 6
d[0] |= (not self.gain & 0x01) << 5
d[0] |= (not self.shutdown & 0x01) << 4
d[0] |= (value >> 8) & 0x0F
d[1] = value & 0xFF
self.writeBytes(d)
self.values[channel] = value
class MCP4921(MCP492X):
def __init__(self, chip=0, vref=3.3):
MCP492X.__init__(self, chip, 1)
class MCP4922(MCP492X):
def __init__(self, chip=0, vref=3.3):
MCP492X.__init__(self, chip, 2)
|
from multicorn import ForeignDataWrapper
from cassandra_provider import CassandraProvider
from properties import ISDEBUG
import properties
import schema_importer
import time
class CassandraFDW(ForeignDataWrapper):
def __init__(self, options, columns):
super(CassandraFDW, self).__init__(options, columns)
self.init_options = options
self.init_columns = columns
self.cassandra_provider = None
self.concurency_level = int(options.get('modify_concurency', properties.DEFAULT_CONCURENCY_LEVEL))
self.per_transaction_connection = options.get('per_transaction_connection', properties.PER_TRANSACTION_CONNECTION) == 'True'
self.modify_items = []
def build_cassandra_provider(self):
if self.cassandra_provider == None:
self.cassandra_provider = CassandraProvider(self.init_options, self.init_columns)
@classmethod
def import_schema(self, schema, srv_options, options, restriction_type, restricts):
return schema_importer.import_schema(schema, srv_options, options, restriction_type, restricts)
def insert(self, new_values):
if self.concurency_level > 1:
self.modify_items.append(('insert', new_values))
if len(self.modify_items) >= properties.BATCH_MODIFY_THRESHOLD:
self.end_modify()
return new_values
else:
return self.cassandra_provider.insert(new_values)
def delete(self, rowid):
if self.concurency_level > 1:
self.modify_items.append(('delete', rowid))
if len(self.modify_items) >= properties.BATCH_MODIFY_THRESHOLD:
self.end_modify()
return { }
else:
return self.cassandra_provider.delete(rowid)
def update(self, rowid, new_values):
if ISDEBUG:
logger.log(u"requested update {0}".format(new_values))
self.insert(new_values)
return new_values
def execute(self, quals, columns, sort_keys=None):
self.scan_start_time = time.time()
return self.cassandra_provider.execute(quals, columns, sort_keys)
def can_sort(self, sort_keys):
return []
def begin(self, serializable):
self.build_cassandra_provider()
if ISDEBUG:
logger.log("begin: {0}".format(serializable))
def commit(self):
if ISDEBUG:
logger.log("commit")
if self.per_transaction_connection:
self.close_cass_connection()
pass
def close_cass_connection(self):
if self.cassandra_provider != None:
self.cassandra_provider.close()
self.cassandra_provider = None
def end_modify(self):
try:
mod_len = len(self.modify_items)
if mod_len > 0:
if ISDEBUG:
logger.log("end modify")
logger.log("modify concurrency level: {0}".format(self.concurency_level))
self.cassandra_provider.execute_modify_items(self.modify_items, self.concurency_level)
finally: |
self.modify_items = []
pass
def explain(self, quals, columns, sortkeys=Non | e, verbose=False):
return self.cassandra_provider.build_select_stmt(quals, columns, self.cassandra_provider.allow_filtering, verbose)
def end_scan(self):
if ISDEBUG:
logger.log("end_scan. Total time: {0} ms".format((time.time() - self.scan_start_time) * 1000))
pass
def pre_commit(self):
if ISDEBUG:
logger.log("pre commit")
pass
def rollback(self):
if ISDEBUG:
logger.log("rollback")
pass
def sub_begin(self, level):
if ISDEBUG:
logger.log("sub begin {0}".format(level))
pass
def sub_commit(self, level):
if ISDEBUG:
logger.log("sub commit {0}".format(level))
pass
def sub_rollback(self, level):
if ISDEBUG:
logger.log("sub rollback {0}".format(level))
pass
@property
def rowid_column(self):
return self.cassandra_provider.get_row_id_column()
def get_rel_size(self, quals, columns):
return self.cassandra_provider.get_rel_size(quals, columns)
def get_path_keys(self):
self.scan_start_time = time.time()
return self.cassandra_provider.get_path_keys() |
#! / | usr/bin/env python
import requests, json
from os.path import expanduser
from coinbase.wallet.client import Client
home = expanduser('~')
client = Client('YOUR_API_KEY', 'YOUR_API_SECRET')
accounts = client.get_accounts()
print accounts ['data'][0 | ]['balance']
|
import struct
import numpy
import io
import pickle
import pyctrl.packet as packet
def testA():
# test A
assert packet.pack('A','C') == b'AC'
assert packet.pack('A','B') == b'AB'
assert packet.pack('A','C') != b'AB'
assert packet.unpack_stream(io.BytesIO(b'AC')) == ('A', 'C')
assert packet.unpack_stream(io.BytesIO(b'AB')) == ('A', 'B')
assert packet.unpack_stream(io.BytesIO(b'AB')) != ('A', 'C')
def testC():
# test C
assert packet.pack('C','C') == b'CC'
assert packet.pack('C','B') == b'CB'
assert packet.pack('C','C') != b'CB'
assert packet.unpack_stream(io.BytesIO(b'CC')) == ('C', 'C')
assert packet.unpack_stream(io.BytesIO(b'CB')) == ('C', 'B')
assert packet.unpack_stream(io.BytesIO(b'CB')) != ('C', 'C')
def testS():
# test S
assert packet.pack('S | ','abc') == struct.pack('<cI3s', b'S', 3, b'abc')
assert packet.pack('S','abcd') != struct.pack('<cI3s', b'S', 3, b'abc')
assert packet.unpack_stream(
io.BytesIO(struct.pack('<cI3s', b'S', 3, b'abc'))) == ('S', 'abc')
assert packet.unpack_stream(
io.BytesIO(struct.pack('<cI3s', b'S', | 3, b'abc'))) != ('S', 'abcd')
def testIFD():
# test I
assert packet.pack('I',3) == struct.pack('<ci', b'I', 3)
assert packet.pack('I',3) != struct.pack('<ci', b'I', 4)
assert packet.unpack_stream(
io.BytesIO(struct.pack('<ci', b'I', 3))) == ('I', 3)
assert packet.unpack_stream(
io.BytesIO(struct.pack('<ci', b'I', 4))) != ('I', 3)
# test F
assert packet.pack('F',3.3) == struct.pack('<cf', b'F', 3.3)
assert packet.pack('F',3.3) != struct.pack('<cf', b'F', 4.3)
assert packet.unpack_stream(
io.BytesIO(struct.pack('<cf', b'F', numpy.float32(3.3)))) == ('F', numpy.float32(3.3))
assert packet.unpack_stream(
io.BytesIO(struct.pack('<cf', b'F', 4.3))) != ('F', 3.3)
# test D
assert packet.pack('D',3.3) == struct.pack('<cd', b'D', 3.3)
assert packet.pack('D',3.3) != struct.pack('<cd', b'D', 4.3)
assert packet.unpack_stream(
io.BytesIO(struct.pack('<cd', b'D', 3.3))) == ('D', 3.3)
assert packet.unpack_stream(
io.BytesIO(struct.pack('<cd', b'D', 4.3))) != ('D', 3.3)
def testV():
# test VI
vector = numpy.array((1,2,3), int)
assert packet.pack('V',vector) == struct.pack('<ccIiii', b'V', b'I', 3, 1, 2, 3)
(type, rvector) = packet.unpack_stream(
io.BytesIO(struct.pack('<ccIiii', b'V', b'I', 3, 1, 2, 3)))
assert type == 'V'
assert numpy.all(rvector == vector)
vector = numpy.array((1,-2,3), int)
assert packet.pack('V',vector) == struct.pack('<ccIiii', b'V', b'I', 3, 1, -2, 3)
(type, rvector) = packet.unpack_stream(
io.BytesIO(struct.pack('<ccIiii', b'V', b'I', 3, 1, -2, 3)))
assert type == 'V'
assert numpy.all(rvector == vector)
# test VF
vector = numpy.array((1.3,-2,3), numpy.float32)
assert packet.pack('V',vector) == struct.pack('<ccIfff', b'V', b'F', 3, 1.3, -2, 3)
(type, rvector) = packet.unpack_stream(
io.BytesIO(struct.pack('<ccIfff', b'V', b'F', 3, 1.3, -2, 3)))
assert type == 'V'
assert numpy.all(rvector == vector)
# test VD
vector = numpy.array((1.3,-2,3), float)
assert packet.pack('V',vector) == struct.pack('<ccIddd', b'V', b'D', 3, 1.3, -2, 3)
(type, rvector) = packet.unpack_stream(
io.BytesIO(struct.pack('<ccIddd', b'V', b'D', 3, 1.3, -2, 3)))
assert type == 'V'
assert numpy.all(rvector == vector)
def testM():
# test MI
vector = numpy.array(((1,2,3), (3,4,5)), int)
assert packet.pack('M',vector) == struct.pack('<cIccIiiiiii', b'M', 2, b'V', b'I', 6, 1, 2, 3, 3, 4, 5)
(type, rvector) = packet.unpack_stream(
io.BytesIO(struct.pack('<cIccIiiiiii', b'M', 2, b'V', b'I', 6, 1, 2, 3, 3, 4, 5)))
assert type == 'M'
assert numpy.all(rvector == vector)
vector = numpy.array(((1,-2,3), (3,4,-5)), int)
assert packet.pack('M',vector) == struct.pack('<cIccIiiiiii', b'M', 2, b'V', b'I', 6, 1, -2, 3, 3, 4, -5)
(type, rvector) = packet.unpack_stream(
io.BytesIO(struct.pack('<cIccIiiiiii', b'M', 2, b'V', b'I', 6, 1, -2, 3, 3, 4, -5)))
assert type == 'M'
assert numpy.all(rvector == vector)
# test MF
vector = numpy.array(((1.3,-2,3), (0,-1,2.5)), numpy.float32)
assert packet.pack('M',vector) == struct.pack('<cIccIffffff', b'M', 2, b'V', b'F', 6, 1.3, -2, 3, 0, -1, 2.5)
(type, rvector) = packet.unpack_stream(
io.BytesIO(struct.pack('<cIccIffffff', b'M', 2, b'V', b'F', 6, 1.3, -2, 3, 0, -1, 2.5)))
assert type == 'M'
assert numpy.all(rvector == vector)
# test MD
vector = numpy.array(((1.3,-2,3), (0,-1,2.5)), numpy.float)
assert packet.pack('M',vector) == struct.pack('<cIccIdddddd', b'M', 2, b'V', b'D', 6, 1.3, -2, 3, 0, -1, 2.5)
(type, rvector) = packet.unpack_stream(
io.BytesIO(struct.pack('<cIccIdddddd', b'M', 2, b'V', b'D', 6, 1.3, -2, 3, 0, -1, 2.5)))
assert type == 'M'
assert numpy.all(rvector == vector)
def testP():
vector = numpy.array(((1.3,-2,3), (0,-1,2.5)), numpy.float)
string = packet.pack('P', vector)
(type, rvector) = packet.unpack_stream(io.BytesIO(string))
assert type == 'P'
assert numpy.all(rvector == vector)
def testKR():
args = { 'a': 1, 'b': 2 }
string = packet.pack('K', args)
(type, rargs) = packet.unpack_stream(io.BytesIO(string))
assert type == 'K'
assert (args == rargs)
args = ('a', 1, 'b', 2)
string = packet.pack('R', args)
(type, rargs) = packet.unpack_stream(io.BytesIO(string))
assert type == 'R'
assert (args == rargs)
if __name__ == "__main__":
testA()
testC()
testS()
testIFD()
testV()
testM()
testP()
testKR()
|
# $Log: pidTK.py,v $
# Revision 1.1 2002/07/12 18:34:47 glandrum
# added
#
# Revision 1.6 2000/11/03 00:56:57 clee
# fixed sizing error in TKCanvas
#
# Revision 1.5 2000/11/03 00:25:37 clee
# removed reference to "BaseTKCanvas" (should just use TKCanvas as default)
#
# Revision 1.4 2000/10/29 19:35:31 clee
# eliminated BaseTKCanvas in favor of straightforward "TKCanvas" name
#
# Revision 1.3 2000/10/29 01:57:41 clee
# - added scrollbar support to both TKCanvas and TKCanvasPIL
# - added getTKCanvas() access method to TKCanvasPIL
#
# Revision 1.2 2000/10/15 00:47:17 clee
# commit before continuing after getting pil to work as package
#
# Revision 1.1.1.1 2000/09/27 03:53:15 clee
# Simple Platform Independent Graphics
#
# Revision 1.6 2000/04/06 01:55:34 pmagwene
# - TKCanvas now uses multiple inheritance from Tkinter.Canvas and piddle.Canvas
# * for the most part works much like a normal Tkinter.Canvas object
# - TKCanvas draws rotated strings using PIL image, other objects using normal Tk calls
# - Minor fixes to FontManager and TKCanvas so can specify root window other than Tk()
# - Removed Quit/Clear buttons from default canvas
#
# Revision 1.5 2000/03/12 07:07:42 clee
# sync with 1_x
#
# Revision 1.4 2000/02/26 23:12:42 clee
# turn off compression by default on piddlePDF
# add doc string to new pil-based piddleTK
#
# Revision 1.3 2000/02/26 21:23:19 | clee
# update that makes PIL based TKCanvas the default Canvas for TK.
# Updated piddletest.py. Also, added clear() methdo to piddlePIL's
# canvas it clears to "white" is this correct behavior? Not well
# specified in current documents.
#
class FontManager:
__alt_faces = {"serif": "Times", "sansserif": "Helvetica", "monospaced": "Courier"}
def __init__(self, master):
self.master = master
self.font_cache = {}
| # the main interface
def stringWidth(self, s, font):
tkfont = self.piddleToTkFont(font)
return tkfont.measure(s)
def fontHeight(self, font):
tkfont = self.piddleToTkFont(font)
return self._tkfontHeight(tkfont)
def fontAscent(self, font):
tkfont = self.piddleToTkFont(font)
return self._tkfontAscent(tkfont)
def fontDescent(self, font):
tkfont = self.piddleToTkFont(font)
return self._tkfontDescent(tkfont)
def getTkFontString(self, font):
"""Return a string suitable to pass as the -font option to
to a Tk widget based on the piddle-style FONT"""
tkfont = self.piddleToTkFont(font)
# XXX: should just return the internal tk font name?
# return str(tkfont)
return ('-family %(family)s -size %(size)s '
'-weight %(weight)s -slant %(slant)s '
'-underline %(underline)s' % tkfont.config())
def getTkFontName(self, font):
"""Return a the name associated with the piddle-style FONT"""
tkfont = self.piddleToTkFont(font)
return str(tkfont)
def piddleToTkFont(self, font):
"""Return a tkFont instance based on the pid-style FONT"""
if font is None:
return ''
#default 12 pt, "Times", non-bold, non-italic
size = 12
family = "Times"
weight = "normal"
slant = "roman"
underline = "false"
if font.face:
# check if the user specified a generic face type
# like serif or monospaced. check is case-insenstive.
f = font.face.lower()
if f in self.__alt_faces:
family = self.__alt_faces[f]
else:
family = font.face
size = font.size or 12
if font.bold:
weight = "bold"
if font.italic:
slant = "italic"
if font.underline:
underline = 'true'
# ugh... is there a better way to do this?
key = (family, size, weight, slant, underline)
# check if we've already seen this font.
if key in self.font_cache:
# yep, don't bother creating a new one. just fetch it.
font = self.font_cache[key]
else:
# nope, let's create a new tk font.
# this way we will return info about the actual font
# selected by Tk, which may be different than what we ask
# for if it's not availible.
font = tkFont.Font(self.master, family=family, size=size, weight=weight, slant=slant,
underline=underline)
self.font_cache[(family, size, weight, slant, underline)] = font
return font
def _tkfontAscent(self, tkfont):
return tkfont.metrics("ascent")
def _tkfontDescent(self, tkfont):
return tkfont.metrics("descent")
class TKCanvas(tk.Canvas, rdkit.sping.pid.Canvas):
__TRANSPARENT = '' # transparent for Tk color
def __init__(self,
size=(300, 300),
name="sping.TK",
master=None,
scrollingViewPortSize=None, # a 2-tuple to define the size of the viewport
**kw):
"""This canvas allows you to add a tk.Canvas with a sping API for drawing.
To add scrollbars, the simpliest method is to set the 'scrollingViewPortSize'
equal to a tuple that describes the width and height of the visible porition
of the canvas on screen. This sets scrollregion=(0,0, size[0], size[1]).
Then you can add scrollbars as you would any tk.Canvas.
Note, because this is a subclass of tk.Canvas, you can use the normal keywords
to specify a tk.Canvas with scrollbars, however, you should then be careful to
set the "scrollregion" option to the same size as the 'size' passed to __init__.
Tkinter's scrollregion option essentially makes 'size' ignored. """
rdkit.sping.pid.Canvas.__init__(self, size=size, name=size)
if scrollingViewPortSize: # turn on ability to scroll
kw["scrollregion"] = (0, 0, size[0], size[1])
kw["height"] = scrollingViewPortSize[0]
kw["width"] = scrollingViewPortSize[1]
else:
kw["width"] = size[0]
kw["height"] = size[1]
apply(tk.Canvas.__init__, (self, master), kw) # use kw to pass other tk.Canvas options
self.config(background="white")
self.width, self.height = size
self._font_manager = FontManager(self)
self._configure()
self._item_ids = []
self._images = []
def _configure(self):
pass
def _display(self):
self.flush()
self.mainloop()
def _quit(self):
self.quit()
# Hmmm...the postscript generated by this causes my Ghostscript to barf...
def _to_ps_file(self, filename):
self.postscript(file=filename)
def isInteractive(self):
return 0
def onOver(self, event):
pass
def onClick(self, event):
pass
def onKey(self, event):
pass
def flush(self):
tk.Canvas.update(self)
def clear(self):
map(self.delete, self._item_ids)
self._item_ids = []
def _colorToTkColor(self, c):
return "#%02X%02X%02X" % (int(c.red * 255), int(c.green * 255), int(c.blue * 255))
def _getTkColor(self, color, defaultColor):
if color is None:
color = defaultColor
if color is rdkit.sping.pid.transparent:
color = self.__TRANSPARENT
else:
color = self._colorToTkColor(color)
return color
def drawLine(self, x1, y1, x2, y2, color=None, width=None):
color = self._getTkColor(color, self.defaultLineColor)
if width is None:
width = self.defaultLineWidth
new_item = self.create_line(x1, y1, x2, y2, fill=color, width=width)
self._item_ids.append(new_item)
# NYI: curve with fill
#def drawCurve(self, x1, y1, x2, y2, x3, y3, x4, y4,
# edgeColor=None, edgeWidth=None, fillColor=None, closed=0):
#
def stringWidth(self, s, font=None):
return self._font_manager.stringWidth(s, font or self.defaultFont)
def fontAscent(self, font=None):
return self._font_manager.fontAscent(font or self.defaultFont)
def fontDescent(self, font=None):
return self._font_manager.fontDescent(font or self.defaultFont)
def drawString(self, s, x, y, font=None, color=None, angle=None):
if angle:
try:
self._drawRotatedString(s, x, y, font, color, angle)
return
except ImportError:
print("PIL not available. Using unrotated strings.")
# fudge factor for TK on linux (at least)
# strings are being drawn using create_text in canvas
y = |
if kwargs['ip_allocation'] == "all":
slave_ips = [self.reserve_ip(project_id=project_id)
for i in range(kwargs['slaves'])]
self.ips = [ip for ip in [master_ip] + slave_ips if ip]
self.master = self.create_vm(vm_name=vm_name, ip=master_ip,
net_id=vpn_id,
flavor=master_flavor,
personality=master_personality,
**kwargs)
# Create slaves
self.slaves = list()
for i in range(kwargs['slaves']):
slave_name = 'lambda-node' + str(i + 1)
slave = self.create_vm(vm_name=slave_name,
ip=slave_ips[i],
net_id=vpn_id,
flavor=slave_flavor,
personality=slave_personality,
**kwargs)
self.slaves.append(slave)
# Wait for VMs to complete being built
if wait:
self.cyclades.wait_server(server_id=self.master['id'])
for slave in self.slaves:
self.cyclades.wait_server(slave['id'])
# Create cluster dictionary object
inventory = {
"master": self.master,
"slaves": self.slaves
}
return inventory
def create_vm(self, vm_name=None, image_id=None,
ip=None, personality=None, flavor=None, **kwargs):
"""
:param vm_name: Name of the virtual machine to create
:param image_id: image id if you want another image than the default
:param kwargs: passed to the functions called for detail options
:return:
"""
flavor_id = flavor['id']
# Get image
if image_id == None:
image_id = self.image_id
else:
image_id = self.find_image(**kwargs)['id']
project_id = self.find_project_id(**kwargs)['id']
networks = list()
if ip:
ip_obj = dict()
ip_obj['uuid'] = ip['floating_network_id']
ip_obj['fixed_ip'] = ip['floating_ip_address']
networks.append(ip_obj)
networks.append({'uuid': kwargs['net_id']})
if personality == None:
personality = []
try:
okeanos_response = self.cyclades.create_server(name=vm_name,
flavor_id=flavor_id,
image_id=image_id,
project_id=project_id,
networks=networks,
personality=personality)
except ClientError as ex:
raise ex
return okeanos_response
def create_vpn(self, network_name, project_id):
"""
Creates a virtual private network
:param network_name: name of the network
:return: the virtual network object
"""
try:
# Create vpn with custom type and the name given as argument
vpn = self.network_client.create_network(
type=self.network_client.network_types[1],
name=network_name,
project_id=project_id)
return vpn
except ClientError as ex:
raise ex
def reserve_ip(self, project_id):
"""
Reserve ip
:return: the ip object if successfull
"""
# list_float_ips = self.network_client.list_floatingips()
# for ip in list_float_ips:
# if ip['instance_id'] is None and ip['port_id'] is None and ip not in ips:
# return ip
try:
ip = self.network_client.create_floatingip(project_id=project_id)
return ip
except ClientError as ex:
raise ex
def create_private_subnet(self, net_id, cidr='19 | 2.168.0.0/24', gateway_ip='192.168.0.1'):
"""
Creates a private subnets and connects it with this network
:param net_id: id of the network
:return: the id of the subnet if successfull
"""
try:
subnet = self. | network_client.create_subnet(net_id, cidr,
gateway_ip=gateway_ip,
enable_dhcp=True)
self.subnet = subnet
return subnet['id']
except ClientError as ex:
raise ex
def connect_vm(self, vm_id, net_id):
"""
Connects the vm with this id to the network with the net_id
:param vm_id: id of the vm
:param net_id: id of the network
:return: returns True if successfull
"""
try:
port = self.network_client.create_port(network_id=net_id,
device_id=vm_id)
return True
except ClientError as ex:
raise ex
def attach_authorized_ip(self, ip, vm_id):
"""
Attach the authorized ip with this id to the vm
:param fnet_id: id of the floating network of the ip
:param vm_id: id of the vm
:return: returns True if successfull
"""
try:
port = self.network_client.create_port(network_id=ip['floating_network_id'],
device_id=vm_id,
fixed_ips=[dict(
ip_address=ip['floating_ip_address']), ])
return True
except ClientError as ex:
raise ex
"""
DELETE RESOURCES
"""
def delete_lambda_cluster(self, details):
"""
Delete a lambda cluster
:param details: details of the cluster we want to delete
:return: True if successfull
"""
# Delete every node
nodes = details['nodes']
for node in nodes:
if (not self.delete_vm(node)):
msg = 'Error deleting node with id ', node
raise ClientError(msg, error_fatal)
# Wait to complete deleting VMs
for node in nodes:
self.cyclades.wait_server(server_id=node, current_status='ACTIVE')
# Delete vpn
vpn = details['vpn']
if (not self.delete_vpn(vpn)):
msg = 'Error deleting node with id ', node
raise ClientError(msg, error_fatal)
def delete_vm(self, vm_id):
"""
Delete a vm
:param vm_id: id of the vm we want to delete
:return: True if successfull
"""
try:
self.cyclades.delete_server(vm_id)
return True
except ClientError as ex:
raise ex
def delete_vpn(self, net_id):
"""
Delete a virtual private network
:param net_id: id of the network we want to delete
:return: True if successfull
"""
try:
self.network_client.delete_network(net_id)
return True
except ClientError as ex:
raise ex
"""
GET RESOURCES
"""
def get_cluster_details(self):
"""
:returns: dictionary of basic details for the cluster
"""
details = dict()
nodes = dict()
master = dict()
master['id'] = self.master['id']
master['name'] = self.master['name']
master['adminPass'] = self.master['adminPass']
nodes['master'] = master
slaves = list()
for slave in self.slaves:
slave_obj = dict()
slave_obj['id'] = slave['id']
slave_obj['name'] = slave['name']
name = slave_obj['name']
slaves.append(slave_obj)
nodes['slaves'] = slaves
details['nodes'] = nodes
vpn = di |
#!/usr/bin/python
import sys
import urllib2
RAINX_STAT_KEYS = [
("rainx.reqpersec", "total_reqpersec"),
("rainx.reqputpersec", "put_reqpersec"),
("rainx.reqgetpersec", "get_reqpersec"),
| ("rainx.avreqtime", "total_avreqtime"),
("rainx.avputreqtime", "put_avreqtime"),
("rainx.avgetreqtime", "get_avreqtime"),
]
def parse_info(stream):
da | ta = {}
for line in stream.readlines():
parts = line.split()
if len(parts) > 1:
# try to cast value to int or float
try:
value = int(parts[1])
except ValueError:
try:
value = float(parts[1])
except ValueError:
value = parts[1]
data[parts[0]] = value
else:
data[parts[0]] = None
return data
def get_stat_lines(url, stat_keys):
stream = urllib2.urlopen(url)
data = parse_info(stream)
stream.close()
stats = [("stat.%s = %s" % (k[1], str(data[k[0]])))
for k in stat_keys if k[0] in data]
return stats
def main(args):
ip_port = args[1].split("|")[2]
stats_url = "http://%s/stat" % ip_port
for stat in get_stat_lines(stats_url, RAINX_STAT_KEYS):
print stat
if __name__ == "__main__":
main(sys.argv)
|
'db_table': 'PIPELINE_TOOL',
},
),
migrations.CreateModel(
name='PipelineReleaseTool',
fields=[
('pipeline', models.ForeignKey(db_column='PIPELINE_ID', on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='emgapi.PipelineRelease')),
('tool', models.ForeignKey(db_column='TOOL_ID', on_delete=django.db.models.deletion.DO_NOTHING, to='emgapi.PipelineTool')),
('tool_group_id', models.DecimalField(db_column='TOOL_GROUP_ID', decimal_places=3, max_digits=6)),
('how_tool_used_desc', models.TextField(db_column='HOW_TOOL_USED_DESC')),
],
options={
'db_table': 'PIPELINE_RELEASE_TOOL',
},
),
migrations.CreateModel(
name='AnalysisStatus',
fields=[
('analysis_status_id', models.AutoField(db_column='ANALYSIS_STATUS_ID', primary_key=True, serialize=False)),
('analysis_status', models.CharField(db_column='ANALYSIS_STATUS', max_length=25)),
],
options={
'db_table': 'ANALYSIS_STATUS',
},
),
migrations.CreateModel(
name='BiomeHierarchyTree',
fields=[
('biome_id', models.SmallIntegerField(db_column='BIOME_ID', primary_key=True, serialize=False)),
('biome_name', models.CharField(db_column='BIOME_NAME', max_length=60)),
('lft', models.SmallIntegerField(db_column='LFT')),
('rgt', models.SmallIntegerField(db_column='RGT')),
('depth', models.IntegerField(db_column='DEPTH')),
('lineage', models.CharField(db_column='LINEAGE', max_length=500)),
],
options={
'db_table': 'BIOME_HIERARCHY_TREE',
},
),
migrations.CreateModel(
name='Publication',
fields=[
('pub_id', models.AutoField(db_column='PUB_ID', primary_key=True, serialize=False)),
('authors', models.CharField(blank=True, db_column='AUTHORS', max_length=4000, null=True)),
('doi', models.CharField(blank=True, db_column='DOI', max_length=1500, null=True)),
('isbn', models.CharField(blank=True, db_column='ISBN', max_length=100, null=True)),
('iso_journal', models.CharField(blank=True, db_column='ISO_JOURNAL', max_length=255, null=True)),
('issue', models.CharField(blank=True, db_column='ISSUE', max_length=55, null=True)),
('medline_journal', models.CharField(blank=True, db_column='MEDLINE_JOURNAL', max_length=255, null=True)),
('pub_abstract', models.TextField(blank=True, db_column='PUB_ABSTRACT', null=True)),
('pubmed_central_id', models.IntegerField(blank=True, db_column='PUBMED_CENTRAL_ID', null=True)),
('pubmed_id', models.IntegerField(blank=True, db_column='PUBMED_ID', null=True)),
('pub_title', models.CharField(db_column='PUB_TITLE', max_length=740)),
('raw_pages', models.CharField(blank=True, db_column='RAW_PAGES', max_length=30, null=True)),
('url', models.CharField(blank=True, db_column='URL', max_length=740, null=True)),
('volume', models.CharField(blank=True, db_column='VOLUME', max_length=55, null=True)),
('published_year', models.SmallIntegerField(blank=True, db_column='PUBLISHED_YEAR', null=True)),
('pub_type', models.CharField(blank=True, db_column='PUB_TYPE', max_length=150, null=True)),
],
options={
'db_table': 'PUBLICATION',
},
),
migrations.CreateModel(
name='Study',
fields=[
('study_id', models.AutoField(db_column='STUDY_ID', primary_key=True, serialize=False)),
('centre_name', models.CharField(blank=True, db_column='CENTRE_NAME', max_length=255, null=True)),
('experimental_factor', models.CharField(blank=True, db_column='EXPERIMENTAL_FACTOR', max_length=255, null=True)),
('is_public', models.IntegerField(blank=True, db_column='IS_PUBLIC', null=True)),
('ncbi_project_id', models.IntegerField(blank=True, db_column='NCBI_PROJECT_ID', null=True)),
('public_release_date', models.DateField(blank=True, db_column='PUBLIC_RELEASE_DATE', null=True)),
('study_abstract', models.TextField(blank=True, db_column='STUDY_ABSTRACT', null=True)),
('ext_study_id', models.CharField(db_column='EXT_STUDY_ID', max_length=18)),
('study_name', models.CharField(blank=True, db_column='STUDY_NAME', max_length=255, null=True)),
('study_status', models.CharField(blank=True, db_column='STUDY_STATUS', max_length=30, null=True)),
('data_origination', models.CharField(blank=True, db_column='DATA_ORIGINATION', max_length=20, null=True)),
('author_email', models.CharField(blank=True, db_column='AUTHOR_EMAIL', max_length=100, null=True)),
('author_name', models.CharField(blank=True, db_column='AUTHOR_NAME', max_length=100, null=True)),
('last_update', models.DateTimeField(db_column='LAST_UPDATE')),
('submission_account_id', models.CharField(blank=True, db_column='SUBMISSION_ACCOUNT_ID', max_length=15, null=True)),
('result_directory', models.CharField(blank=True, db_column='RESULT_DIRECTORY', max_length=100, null=True)),
('first_created', models.DateTimeField(db_column='FIRST_CREATED')),
('project_id', models.CharField(blank=True, db_column='PROJECT_ID', max_length=18, null=True)),
('biome', models.ForeignKey(db_column='BIOME_ID', on_delete=django.db.models.deletion.DO_NOTHING, to='emgapi.BiomeHierarchyTree')),
# ('publications', models.ManyToManyField(through='emgapi.StudyPublication', to='emgapi.Publication')),
],
options={
'db_table': 'STUDY',
},
),
migrations.CreateModel(
name='StudyPublication',
fields=[
('study', models.ForeignKey(db_column='STUDY_ID', on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='emgapi.Study')),
('pub', models.ForeignKey(db_column='PUB_ID', on_delete=django.db.models.deletion.DO_NOTHING, to='emgapi.Publication')),
],
options={
'db_table': 'STUDY_PUBLICATION',
},
),
migrations.CreateModel(
name='Sample',
fields=[
('sample_id', models.AutoField(db_column='SAMPLE_ID', primary_key=True, serialize=False)),
('analysis_completed', models.DateField(blank=True, db_column='ANALYSIS_COMPLETED', null=True)),
('collection_date', models.DateField(blank=True, db_column='COLLECTION_DATE', null=True)),
('geo_loc_name', models.CharField(blank=True, db_column='GEO_LOC_NAME', max_length=255, null=True)),
('is_public', models.IntegerField(blank=True, db_column='IS_PUBLIC', null=True)),
('metadata_received', models.DateTimeField(blank=True, db_column='METADATA_RECEIVED', null=True)),
('sample_desc', models.TextField(blank=True, db_column='SAMPLE_DESC', null=True)),
('sequencedata_archived', models.DateTimeField(blank=True, db_column='SEQUENCEDATA_ARCHIVED', null= | True)),
('sequencedata_received', models.DateTimeField(blank=True, db_column='SEQUENCEDATA_RECEIVED', null=Tr | ue)),
('environment_biome', models.CharField(blank=True, db_column='ENVIRONMENT_BIOME', max_length=255, null=True)),
('environment_feature', models.CharField(blank=True, db_column='ENVIRONMENT_FEATURE', max_length=255, null=True)),
('environment_material', models.CharField(blank=True, db_colum |
"""
WSGI config for test_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, | see
https://docs.djangoproject.com/en/stable/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefau | lt("DJANGO_SETTINGS_MODULE", "test_project.settings")
application = get_wsgi_application()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-27 19:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0005_post_author'),
]
operations = [
migrations.CreateModel(
name='P | ostImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
| ('alt_text', models.CharField(blank=True, max_length=96, null=True)),
('image', models.ImageField(upload_to='')),
],
),
migrations.AddField(
model_name='post',
name='images',
field=models.ManyToManyField(related_name='posts', to='posts.PostImage'),
),
]
|
set_no_section(self):
conf = Config()
conf.defaults.add_section("player")
conf.defaults.set("player", "backend", "blah")
conf.reset("player", "backend")
assert conf.get("player", "backend") == "blah"
def test_initial_after_set(self):
conf = Config()
conf.add_section("player")
conf.set("player", "backend", "orig")
conf.defaults.add_section("player")
conf.defaults.set("player", "backend", "initial")
self.assertEqual(conf.get("player", "backend"), "orig")
self.assertEqual(conf.defaults.get("player", "backend"), "initial")
conf.reset("player", "backend")
self.assertEqual(conf.get("player", "backend"), "initial")
def test_get_fallback_default(self):
conf = Config()
conf.defaults.add_section("get")
self.assertRaises(Error, conf.get, "get", "bar")
conf.defaults.set("get", "bar", 1)
self.assertEqual(conf.get("get", "bar"), "1")
conf.defaults.add_section("getboolean")
self.assertRaises(Error, conf.getboolean, "getboolean", "bar")
conf.defaults.set("getboolean", "bar", True)
self.assertEqual(conf.getboolean("getboolean", "bar"), True)
conf.defaults.add_section("getfloat")
self.assertRaises(Error, conf.getfloat, "getfloat", "bar")
conf.defaults.set("getfloat", "bar", 1.0)
self.assertEqual(conf.getfloat("getfloat", "bar"), 1.0)
conf.defaults.add_section("getint")
self.assertRaises(Error, conf.getint, "getint", "bar")
conf.defaults.set("getint", "bar", 42)
self.assertEqual(conf.getint("getint", "bar"), 42)
conf.defaults.add_section("getlist")
self.assertRaises(Error, conf.getlist, "getlist", "bar")
conf.defaults.setlist("getlist", "bar", [1, 2, 3])
self.assertEqual(conf.getlist("getlist", "bar"), ["1", "2", "3"])
def test_get(self):
conf = Config()
conf.add_section("foo")
conf.set("foo", "int", "1")
conf.set("foo", "float", "1.25")
conf.set("foo", "str", "foobar")
conf.set("foo", "bool", "True")
self.failUnlessEqual(conf.getint("foo", "int"), 1)
self.failUnlessEqual(conf.getfloat("foo", "float"), 1.25)
self.failUnlessEqual(conf.get("foo", "str"), "foobar")
self.failUnlessEqual(conf.getboolean("foo", "bool"), True)
def test_get_invalid_data(self):
conf = Config()
conf.add_section("foo")
conf.set("foo", "bla", "xx;,,;\n\n\naa")
self.assertTrue(conf.getboolean("foo", "bla", True))
self.assertEqual(conf.getint("foo", "bla", 42), 42)
self.assertEqual(conf.getfloat("foo", "bla", 1.5), 1.5)
self.assertEqual(conf.getstringlist("foo", "bla", ["baz"]), ["baz"])
def test_getint_float(self):
conf = Config()
conf.add_section("foo")
conf.set("foo", "float", "1.25")
self.assertEqual(conf.getint("foo", "float"), 1)
def test_get_default(self):
conf = Config()
conf.add_section("foo")
self.failUnlessEqual(conf.getboolean("foo", "nothing", True), True)
self.failUnlessEqual(conf.getint("foo", "nothing", 42), 42)
self.failUnlessEqual(conf.getfloat("foo", "nothing", 42.42), 42.42)
self.failUnlessEqual(conf.get("foo", "nothing", "foo"), "foo")
def test_stringlist_simple(self):
conf = Config()
conf.add_section("foo")
self.failIf(conf.get("foo", "bar", None))
vals = ["one", "two", "three"]
conf.setstringlist("foo", "bar", vals)
self.failUnlessEqual(conf.getstringlist("foo", "bar"), vals)
def test_stringlist_mixed(self):
conf = Config()
conf.add_section("foo")
self.failIf(conf.get("foo", "bar", None))
conf.setstringlist("foo", "bar", ["one", 2])
self.failUnlessEqual(conf.getstringlist("foo", "bar"), ["one", "2"])
def test_stringlist_quoting(self):
conf = Config()
conf.add_section("foo")
self.failIf(conf.get("foo", "bar", None))
vals = ["foo's gold", "bar, \"best\" 'ever'",
u"le goût d'œufs à Noël"]
conf.setstringlist("foo", "bar", vals)
self.failUnlessEqual(conf.getstringlist("foo", "bar"), vals)
def test_stringlist_spaces(self):
conf = Config()
conf.add_section("foo")
vals = [" ", " ", " \t ", " \n \n"]
conf.setstringlist("foo", "bar", vals)
self.failUnlessEqual(conf.getstringlist("foo", "bar"), vals)
def test_stringlist_invalid_encoding(self):
conf = Config()
conf.add_section("foo")
conf.setbytes("foo", "bar", b"\xff\xff\xff\xff\xff\xff")
def test_getlist(self):
conf = Config()
conf.add_section("foo")
self.assertEqual(conf.getlist("foo", "bar", ["arg"]), ["arg"])
conf.set("foo", "bar", "abc,fo:o\\,bar")
self.assertEqual(conf.getlist("foo", "bar"), ["abc", "fo:o,bar"])
self.assertEqual(conf.getlist("foo", "bar", sep=":"),
["abc,fo", "o\\,bar"])
conf.set("foo", "bar", "")
self.assertEqual(conf.getlist("foo", "bar"), [""])
def test_setlist(self):
conf = Config()
conf.add_section("foo")
conf.setlist("foo", "bar", [" a", ",", "c"])
self.assertEqua | l(conf.getlis | t("foo", "bar"), [" a", ",", "c"])
self.assertEqual(conf.get("foo", "bar"), " a,\\,,c")
conf.setlist("foo", "bar", [" a", ",", "c"], sep=":")
self.assertEqual(conf.get("foo", "bar"), " a:,:c")
def test_versioning_disabled(self):
# we don't pass a version, so versioning is disabled
conf = Config()
self.assertRaises(Error, conf.get_version)
with temp_filename() as filename:
conf.read(filename)
self.assertRaises(Error, conf.register_upgrade_function, lambda: None)
def test_versioning_upgrade_func(self):
called = []
with temp_filename() as filename:
conf = Config(version=0)
def func(*args):
called.append(args)
conf.register_upgrade_function(func)
self.assertRaises(Error, conf.get_version)
conf.read(filename)
self.assertEqual(conf.get_version(), -1)
conf.register_upgrade_function(func)
self.assertEqual([(conf, -1, 0), (conf, -1, 0)], called)
def test_versioning(self):
with temp_filename() as filename:
conf = Config(version=41)
conf.add_section("foo")
conf.set("foo", "bar", "quux")
conf.write(filename)
self.assertRaises(Error, conf.get_version)
# old was 41, we have 42, so upgrade
def func(config, old, new):
if old < 42:
config.set("foo", "bar", "nope")
conf = Config(version=42)
conf.register_upgrade_function(func)
conf.read(filename)
self.assertEqual(conf.get_version(), 41)
self.assertEqual(conf.get("foo", "bar"), "nope")
# write doesn't change version
conf.write(filename)
self.assertEqual(conf.get_version(), 41)
# but if we load again, it does
conf.read(filename)
self.assertEqual(conf.get_version(), 42)
def test_upgrade_first_read(self):
# don't run upgrade funcs if there is no config file yet
with temp_filename() as filename:
pass
conf = Config(version=41)
def func(*args):
self.assertTrue(False)
conf.register_upgrade_function(func)
conf.read(filename)
class TConfigProxy(TestCase):
def setUp(self):
conf = Config()
conf.defaults.add_section("somesection")
self.proxy = ConfigProxy(conf, "somesection")
def test_getters_setters(self):
self.proxy.set("foo", "bar")
self.assertEqual(self.proxy.get("foo"), "bar")
self.proxy.set("foo", 1.5)
self.assertEqual(self.proxy.getfloat("foo"), 1.5)
self.proxy.set( |
#!/usr/bin/env python
# encoding: utf-8
"""
update/disease.py
Update the disease terms in database
Created by Måns Magnusson on 2017-04-03.
Copyright (c) 2017 __MoonsoInc__. All rights reserved.
"""
import logging
import os
import click
from flask.cli import current_app, with_appcontext
from scout.constants import UPDATE_DISEASES_RESOURCES
from scout.load.hpo import load_disease_terms
from scout.server.extensions import store
from scout.utils.handle import get_file_handle
from scout.utils.scout_requests import (
fetch_hpo_terms,
fetch_hpo_to_genes_to_disease,
fetch_mim_files,
)
LOG = logging.getLogger(__name__)
def _check_resources(resources):
"""Check that resource lines file contain valid data
Args:
resources(dict): resource names as keys and resource file lines as values
"""
for resname, lines in resources.items():
if not lines or lines[0].startswit | h("#") is False:
LOG.error(f"Resource file '{resname}' doesn't contain valid data.")
raise click.Abort()
def _fetch_downloaded_resources(resources, downloads_folder):
"""Populate resource lines if a resou | rce exists in downloads folder
Args:
resources(dict):
downloads_folder(str): path to downloaded files or demo version of these files
"""
for resname, filenames in UPDATE_DISEASES_RESOURCES.items():
for filename in filenames:
resource_path = os.path.join(downloads_folder, filename)
resource_exists = os.path.isfile(resource_path)
if resource_exists:
resources[resname] = get_file_handle(resource_path).readlines()
if resname not in resources:
LOG.error(f"Resource file '{resname}' was not found in provided downloads folder.")
raise click.Abort()
@click.command("diseases", short_help="Update disease terms")
@click.option(
"-f",
"--downloads-folder",
type=click.Path(exists=True, dir_okay=True, readable=True),
help="specify path to folder where files necessary to update diseases are pre-downloaded",
)
@click.option(
"--api-key",
help="Download resources using an OMIM api key (required only if downloads folder is NOT specified)",
)
@with_appcontext
def diseases(downloads_folder, api_key):
"""
Update disease terms in mongo database. Use pre-downloaded resource files (phenotype_to_genes and genemap2) or download them from OMIM.
Both options require using a valid omim api key.
"""
adapter = store
api_key = api_key or current_app.config.get("OMIM_API_KEY")
resources = {}
if downloads_folder:
api_key = None
# Fetch required resource lines after making sure that are present in downloads folder and that contain valid data
_fetch_downloaded_resources(resources, downloads_folder)
else:
# Download resources
if not api_key:
LOG.warning("Please provide a omim api key to load the omim gene panel")
raise click.Abort()
try:
mim_files = fetch_mim_files(api_key, genemap2=True)
resources["genemap_lines"] = mim_files["genemap2"]
resources["hpo_gene_lines"] = fetch_hpo_to_genes_to_disease()
except Exception as err:
LOG.warning(err)
raise click.Abort()
_check_resources(resources)
LOG.info("Dropping DiseaseTerms")
adapter.disease_term_collection.delete_many({})
LOG.debug("DiseaseTerms dropped")
load_disease_terms(
adapter=adapter,
genemap_lines=resources["genemap_lines"],
hpo_disease_lines=resources["hpo_gene_lines"],
)
LOG.info("Successfully loaded all disease terms")
|
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')p9u&kcu@_(8u&-%4(m9!&4*82sx97zyl-!i#m9kic2lycj%0)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'demografia.apps.Demogr | afiaConfig',
'dal',
'dal_select2',
'suit',
'django.contrib.admin',
'django.contrib.auth',
| 'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
#'input_mask',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
ROOT_URLCONF = 'comunidad.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'comunidad.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': '127.0.0.1',
'NAME': 'comunidad',
'PASSWORD': '123456',
'PORT': '5432',
'USER': 'postgres',
'SCHEMAS': 'public,demografia'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'es'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
SUIT_CONFIG = {
# header
'ADMIN_NAME': 'comunidad',
'HEADER_DATE_FORMAT': 'l, j. F Y',
'HEADER_TIME_FORMAT': 'H:i',
# forms
'SHOW_REQUIRED_ASTERISK': True, # Default True
'CONFIRM_UNSAVED_CHANGES': True, # Default True
# menu
'SEARCH_URL': '/admin/auth/user/',
'MENU_ICONS': {
'sites': 'icon-leaf',
'auth': 'icon-lock',
},
# 'MENU_OPEN_FIRST_CHILD': True, # Default True
'MENU_EXCLUDE': ('demografia.miembrohogar',),
# 'MENU': (
# 'sites',
# {'app': 'auth', 'icon':'icon-lock', 'models': ('user', 'group')},
# {'label': 'Settings', 'icon':'icon-cog', 'models': ('auth.user', 'auth.group')},
# {'label': 'Support', 'icon':'icon-question-sign', 'url': '/support/'},
# ),
# misc
'LIST_PER_PAGE': 20
}
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
LOGIN_REDIRECT_URL = 'index'
CACHE_BACKEND = 'simple:///'
AUTH_PROFILE_MODULE = "demografia.persona"
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
] |
# coding=utf-8
# dictionary value -> 7-segment data
Font = {
0: 0b00111111, # (48) 0
1: 0b00000110, # (49) 1
2: 0b01011011, # (50) 2
3: 0b01001111, # (51) 3
4: 0b01100110, # (52) 4
5: 0b01101101, # (53) 5
6: 0b01111101, # (54) 6
7: 0b00100111, # (55) 7
8: 0b01111111, # (56) 8
9: 0b01101111, # (57) 9
}
# build array10 and array10 of numbers such that
# i/16 = array10[i]/10 + array100[i&7]/100 (approximatively)
array10 = []
array100 = []
for i in range(16):
f = i/16.0
if i < 8:
array100.append(int(f * 100) % 10)
array10.append(int(f * 10))
print(array10)
print(array100)
# check
for i in range(16):
print("%d -> %s%s" % (i, array10[i], array100[i & 7]))
# print the C arrays
print("const uint8_t digit[16] = {" + ",".join(str(Font[i % 10]+128) for i in range(16)) + "};")
print("const uint8_t array10[16] = {" + ",".join(str(Font[array10[i]]) for i in range(16)) + "};")
print("const uint8_t array100[8] = {" + ",".join(str(Font[array100[i]]) for i in range(8)) + "};")
# check
for i in range(256):
# if i&15:
print("%s% | d.%d%d%d%d" % ("1" if ((i >> 4) > 9) else " ", (i >> 4) % 10, array10[i & 15 | ], array100[i & 7],
array100[i & 3], array100[(i << 1) & 3]))
# else:
# print("%d.%d%d%d%d" % (i >> 4, 0, 0, 0, 0))
|
# -*- coding: utf-8 -*-
from django.views.generic import View
from django.views.decorators.csrf import csrf_exempt
try:
from django.http import StreamingHttpResponse as HttpResponse
except ImportError:
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from sse import Sse
class BaseSseView(View):
"""
This is a base class for sse streaming.
"""
def get_last_id(self):
if "HTTP_LAST_EVENT_ID" in self.request.META:
return self.request.META['HTTP_LAST_EVENT_ID']
return None
def _iterator(self):
for subiterator in self.iterator():
for bufferitem in self.sse:
yield bufferitem
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
self.sse = Sse()
self.request = request
self.args = args
self.kwargs | = kwargs
response = HttpResponse(self._ite | rator(), content_type="text/event-stream")
response['Cache-Control'] = 'no-cache'
response['Software'] = 'django-sse'
return response
def iterator(self):
"""
This is a source of stream.
Must use ``yield`` statement to flush
content from sse object to the client.
Example:
def iterator(self):
counter = 0
while True:
self.sse.add_message('foo', 'bar')
self.sse.add_message('bar', 'foo')
yield
"""
raise NotImplementedError
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from mpi4py import MPI
import sys
from cplpy import CPL
#initialise MPI and CPL
comm = MPI.COMM_WORLD
CPL = CPL()
MD_COMM = CPL.init(CPL.CFD_REALM)
nprocs_realm = MD_COMM.Get_size()
## Parameters of the cpu topology (cartesian grid)
npxyz = np.array([1, 1, 1], order='F', dtype=np.int32)
NProcs = np.product(npxyz)
print 'Number of arguments:', len(sys.argv), 'arguments: ', str(sys.argv)
if len(sys.argv) > 1:
g = float(sys.argv[1])
else:
g = 9.81
xyzL = np.array([1.5000000000000000E-003,
1.5000000000000000E-003,
2.5000000000000001E-003], order='F', dtype=np.float64)
xyz_orig = np.array([0.0, 0.0, 0.0], order='F', dtype=np.float64)
ncxyz = np.array([8, 8, 8], order='F', dtype=np.int32)
if (nprocs_realm != NProcs):
print("Non-coherent number of processes in MD ", nprocs_realm,
" no equal to ", npxyz[0], " X ", npxyz[1], " X ", npxyz[2])
MPI.Abort(errorcode=1)
#Setup coupled simulation
cart_comm = MD_COMM.Create_cart([npxyz[0], npxyz[1], npxyz[2]])
CPL.setup_cfd(cart_comm, xyzL, xyz_orig, ncxyz)
#Get constraint region
cnst_limits = CPL.get_cnst_limits();
cnst_portion = CPL.my_proc_portion(cnst_limits)
[cnst_ncxl, cnst_ncyl, cnst_nczl] = CPL.get_no_cells(cnst_portion)
#Get overlap region
olap_limits = CPL.get_olap_limits()
BC_limits = np.array([olap_limits[0], olap_limits[1],
olap_limits[2], olap_limits[3],
olap_limits[4], olap_limits[5]], dtype=np.int32)
BC_portion = CPL.my_proc_portion(BC_limits)
[BC | _ncxl, BC_ncyl, BC_nczl] = CPL.get_no_cells(BC_portion)
# | Allocate send and recv arrays
recv_array = np.zeros((4, BC_ncxl, BC_ncyl, BC_nczl), order='F', dtype=np.float64)
send_array = np.zeros((9, cnst_ncxl, cnst_ncyl, cnst_nczl), order='F', dtype=np.float64)
ft = True
Nsteps = 21
for time in range(Nsteps):
# send data to update
send_array[2,:,:,:] = -5.9490638385009208e-08*g*np.sin(2.*np.pi*time/Nsteps)
CPL.send(send_array, cnst_portion)
# recv data and plot
recv_array, ierr = CPL.recv(recv_array, BC_portion)
print(time)
CPL.finalize()
MPI.Finalize()
|
it__, as it will bypass
# our __setitem__
dict.__init__(self)
self._as_list = {}
self._last_key = None
if (len(args) == 1 and len(kwargs) == 0 and
isinstance(args[0], HTTPHeaders)):
# Copy constructor
for k, v in args[0].get_all():
self.add(k, v)
else:
# Dict-style initialization
self.update(*args, **kwargs)
# new public methods
def add(self, name, value):
"""Adds a new value for the given key."""
norm_name = HTTPHeaders._normalize_name(name)
self._last_key = norm_name
if norm_name in self:
# bypass our override of __setitem__ since it modifies _as_list
dict.__setitem__(self, norm_name, self[norm_name] + ',' + value)
self._as_list[norm_name].append(value)
else:
self[norm_name] = value
def get_list(self, name):
"""Returns all values for the given header as a list."""
norm_name = HTTPHeaders._normalize_name(name)
return self._as_list.get(norm_name, [])
def get_all(self):
"""Returns an iterable of all (name, value) pairs.
If a header has multiple values, multiple pairs will be
returned with the same name.
"""
for name, list in self._as_list.iteritems():
for value in list:
yield (name, value)
def parse_line(self, line):
"""Updates the dictionary with a single header line.
>>> h = HTTPHeaders()
>>> h.parse_line("Content-Type: text/html")
>>> h.get('content-type')
'text/html'
"""
if line[0].isspace():
# continuation of a multi-line header
new_part = ' ' + line.lstrip()
self._as_list[self._last_key][-1] += new_part
dict.__setitem__(self, self._last_key,
self[self._last_key] + new_part)
else:
name, value = line.split(":", 1)
self.add(name, value.strip())
@classmethod
def parse(cls, headers):
"""Returns a dictionary from HTTP header text.
>>> h = HTTPHeaders.parse(
"Content-Type: text/html\\r\\nContent-Length: 42\\r\\n")
>>> sorted(h.iteritems())
[('Content-Length', '42'), ('Content-Type', 'text/html')]
"""
h = cls()
for line in headers.splitlines():
if line:
h.parse_line(line)
return h
# dict implementation overrides
def __setitem__(self, name, value):
norm_name = HTTPHeaders._normalize_name(name)
dict.__setitem__(self, norm_name, value)
self._as_list[norm_name] = [value]
def __getitem__(self, name):
return dict.__getitem__(self, HTTPHeaders._normalize_name(name))
def __delitem__(self, name):
norm_name = HTTPHeaders._normalize_name(name)
dict.__delitem__(self, norm_name)
del self._as_list[norm_name]
def __contains__(self, name):
norm_name = HTTPHeaders._normalize_name(name)
return dict.__contains__(self, norm_name)
def get(self, name, default=None):
return dict.get(self, HTTPHeaders._normalize_name(name), default)
def update(self, *args, **kwargs):
# dict.update bypasses our __setitem__
for k, v in dict(*args, **kwargs).iteritems():
self[k] = v
def copy(self):
# default implementation returns dict(self), not the subclass
return HTTPHeaders(self)
_NORMALIZED_HEADER_RE = \
re.compile(r'^[A-Z0-9][a-z0-9]*(-[A-Z0-9][a-z0-9]*)*$')
_normalized_headers = {}
@staticmethod
def _normalize_name(name):
"""Converts a name to Http-Header-Case.
>>> HTTPHeaders._normalize_name("coNtent-TYPE")
'Content-Type'
"""
try:
return HTTPHeaders._normalized_headers[name]
except KeyError:
if HTTPHeaders._NORMALIZED_HEADER_RE.match(name):
normalized = name
else:
normalized = "-".join(
[w.capitalize() for w in name.split("-")])
HTTPHeaders._normalized_headers[name] = normalized
return normalized
def url_concat(url, args):
"""Concatenate url and argument dictionary regardless of whether
url has existing query parameters.
>>> url_concat("http://example.com/foo?a=b", dict(c="d"))
'http://example.com/foo?a=b&c=d'
"""
if not args:
return url
if url[-1] not in ('?', '&'):
url += '&' if ('?' in url) else '?'
return url + urllib.urlencode(args)
class HTTPFile(ObjectDict):
"""Represents an HTTP file. For backwards compatibility, its instance
attributes are also accessible as dictionary keys.
:ivar filename:
:ivar body:
:ivar content_type: The content_type comes from the provided HTTP header
and should not be trusted outright given that it can be easily forged.
"""
pass
def parse_multipart_form_data(boundary, data, arguments, files):
"""Parses a multipart/form-data body.
The boundary and data parameters are both byte strings.
The dictionaries given in the arguments and files parameters
will be updated with the contents of the body.
"""
# The standard allows for the boundary to be quoted in the header,
# although it's rare (it happens at least for google app engine
# xmpp). I think we're also supposed to handle backslash-escapes
# here but I'll save that until we see a client that uses them
# in the wild.
if boundary.startswith(b('"')) and boundary.endswith(b('"')):
boundary = boundary[1:-1]
final_boundary_index = data.rfind(b("--") + boundary + b("--"))
if final_boundary_index == -1:
log.msg("Invalid multipart/form-data: no final boundary")
return
parts = data[:final_boundary_index].split(b("--") + boundary + b("\r\n"))
for part in parts:
if not part:
continue
eoh = part.find(b("\r\n\r\n"))
if eoh == -1:
log.msg("multipart/form-data missing headers")
continue
headers = HTTPHeaders.parse(part[:eoh].decode("utf-8"))
disp_header = headers.get("Content-Disposition", "")
disposition, disp_params = _parse_header(disp_header)
if disposition != "form-data" or not part.endswith(b("\r\n")):
log.msg("Invalid multipart/form-data")
continue
value = part[eoh + 4:-2]
if not disp_params.get("name | "):
log.msg("multipart/form-data value missing name")
continue
name = disp_params["name"]
if disp_params.get("filename"):
ctype = headers.get("Content-Type", "application/unknown")
files.setdefault(name, []).append(HTTPFile(
filename=disp_params["filename"], body=value,
content_type=ctype))
else:
arguments.setdefault(name, []).append(value)
# _parseparam and _parse_header | are copied and modified from python2.7's cgi.py
# The original 2.7 version of this code did not correctly support some
# combinations of semicolons and double quotes.
def _parseparam(s):
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
def _parse_header(line):
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
parts = _parseparam(';' + line)
key = parts.next()
pdict = {}
for p in parts:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i + 1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
def doctests():
|
)
elif self.summary_method == TimeSeriesSummaryMethod.MAX:
masked_summary = numpy.max(masked_stack[band], axis=0)
elif self.summary_method == TimeSeriesSummaryMethod.MEAN:
masked_summary = numpy.mean(masked_stack[band], axis=0)
elif self.summary_method == TimeSeriesSummaryMethod.MEDIAN:
masked_summary = numpy.median(masked_stack[band], axis=0)
# aka 50th percentile
elif self.summary_method == TimeSeriesSummaryMethod.MEDIAN_NON_INTERPOLATED:
masked_sorted = numpy.ma.sort(masked_stack[band], axis=0)
masked_percentile_index = numpy.ma.floor(numpy.ma.count(masked_sorted, axis=0) * 0.95).astype(numpy.int16)
masked_summary = numpy.ma.choose(masked_percentile_index, masked_sorted)
elif self.summary_method == TimeSeriesSummaryMethod.COUNT:
# TODO Need to artificially create masked array here since it is being expected/filled below!!!
masked_summary = numpy.ma.masked_equal(masked_stack[band].count(axis=0), ndv)
elif self.summary_method == TimeSeriesSummaryMethod.SUM:
masked_summary = numpy.sum(masked_stack[band], axis=0)
elif self.summary_method == TimeSeriesSummaryMethod.STANDARD_DEVIATION:
masked_summary = numpy.std(masked_stack[band], axis=0)
elif self.summary_method == TimeSeriesSummaryMethod.VARIANCE:
masked_summary = numpy.var(masked_stack[band], axis=0)
# currently 95th percentile
elif self.summary_method == TimeSeriesSummaryMethod.PERCENTILE:
masked_sorted = numpy.ma.sort(masked_stack[band], axis=0)
masked_percentile_index = numpy.ma.floor(numpy.ma.count(masked_sorted, axis=0) * 0.95).astype(numpy.int16)
masked_summary = numpy.ma.choose(masked_percentile_index, masked_sorted)
elif self.summary_method == TimeSeriesSummaryMethod.YOUNGEST_PIXEL:
# TODO the fact that this is band at a time might be problematic. We really should be considering
# all bands at once (that is what the landsat_mosaic logic did). If PQA is being applied then
# it's probably all good but if not then we might get odd results....
masked_summary = empty_array(shape=(self.chunk_size_x, self.chunk_size_x), dtype=numpy.int16, ndv=ndv)
# Note the reversed as the stack is created oldest first
for d in reversed(stack[band]):
masked_summary = numpy.where(masked_summary == ndv, d, masked_summary)
# If the summary doesn't contain an no data values then we can stop
if not numpy.any(masked_summary == ndv):
break
# TODO Need to artificially create masked array here since it is being expected/filled below!!!
masked_summary = numpy.ma.masked_equal(masked_summary, ndv)
elif self.summary_method == TimeSeriesSummaryMethod.OLDEST_PIXEL:
# TODO the fact that this is band at a time might be problematic. We really should be considering
# all bands at once (that is what the landsat_mosaic logic did). If PQA is being applied then
# it's probably all good but if not then we might get odd results....
masked_summary = empty_array(shape=(self.chunk_size_x, self.chunk_size_x), dtype=numpy.int16, ndv=ndv)
# Note the NOT reversed as the stack is created oldest first
for d in stack[band]:
masked_summary = numpy.where(masked_summary == ndv, d, masked_summary)
# If the summary doesn't contain an no data values then we can stop
if not numpy.any(masked_summary == ndv):
break
# TODO Need to artificially create masked array here since it is being expected/filled below!!!
masked_summary = numpy.ma.masked_equal(masked_summary, ndv)
masked_stack[band] = None
_log.debug("NONE-ing masked stack[%s]", band.name)
_log.debug("Current MAX RSS usage is [%d] MB", resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024)
_log.debug("masked summary is [%s]", masked_summary)
_log.debug("Current MAX RSS usage is [%d] MB", resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024)
# Create the output file
if not os.path.exists(path):
_log.info("Creating raster [%s]", path)
driver = gdal.GetDriverByName("GTiff")
assert driver
raster = driver.Create(path, metadata.shape[0], metadata.shape[1], len(bands), gdal.GDT_Int16)
assert raster
raster.SetGeoTransform(metadata.transform)
raster.SetProjection(metadata.projection)
for b in bands:
raster.GetRasterBand(b.value).SetNoDataValue(ndv)
_log.info("Writing band [%s] data to raster [%s]", band.name, path)
_log.debug("Current MAX RSS usage is [%d] MB", resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024)
raster.GetRasterBand(band.value).WriteArray(masked_summary.filled(ndv), xoff=x, yoff=y)
raster.GetRasterBand(band.value).ComputeStatistics(True)
raster.FlushCache()
masked_summary = None
_log.debug("NONE-ing the masked summary")
_log.debug("Current MAX RSS usage is [%d] MB", resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024)
stack = None
_log.debug("Just NONE-ed the stack")
_log.debug("Current MAX RSS usage is [%d] MB", resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024)
raster = None
_log.debug("Just NONE'd the raster")
_log.debug("Current MAX RSS usage is [%d] MB", resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024)
_log.info("Memory usage was [%d MB]", resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024)
_log.info("CPU time used [%s]", timedelta(seconds=int(resource.getrusage(resource.RUSAGE_SELF).ru_utime)))
def get_output_filename(self, dataset_type):
if dataset_type == DatasetType.WATER:
return os.path.join(self.output_directory,
"LS_WOFS_SUMMARY_{x:03d}_{y:04d}_{acq_min}_{acq_max}.tif".format(latitude=self.x,
longitude=self.y,
acq_min=self.acq_min,
acq_max=self.acq_max))
satellite_str = ""
if Satellite.LS5 in self.satellites or Satellite.LS7 in self.satellites or Satellite.LS8 in self.satellites:
satellite_str += "LS"
if Satellite.LS5 in self.satellites:
satellite_str += "5"
if Satellite.LS7 in self.satellites:
satellite_str += "7"
if Satellite.LS8 in self.satellites:
satellite_str += "8"
dataset_str = ""
if dataset_type == DatasetType.ARG25:
| dataset_str += "NBAR"
elif dataset_type == DatasetType.PQ25:
dataset_str += "PQA"
elif dataset_type == DatasetType.FC25:
dataset_str += "FC"
elif dataset_type == DatasetType.WATER:
dataset_str += "WOFS"
if self.apply_pqa_filter and dataset_type != Data | setType.PQ25:
dataset_str += "_WITH_PQA"
return os.path.join(self.output_direc |
ck
import oauthlib
from django.conf import settings
from django.core.urlresolvers import reverse
from nose.plugins.attrib import attr
from courseware.tests import BaseTestXmodule
from courseware.views.views import get_course_lti_endpoints
from openedx.core.lib.url_utils import quote_slashes
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.x_module import STUDENT_VIEW
@attr(shard=1)
class TestLTI(BaseTestXmodule):
"""
Integration test for lti xmodule.
It checks overall code, by assuring that context that goes to template is correct.
As part of that, checks oauth signature generation by mocking signing function
of `oauthlib` library.
"""
CATEGORY = "lti"
def setUp(self):
"""
Mock oauth1 signing of requests library for testing.
"""
super(TestLTI, self).setUp()
mocked_nonce = u'135685044251684026041377608307'
mocked_timestamp = u'1234567890'
mocked_signature_after_sign = u'my_signature%3D'
mocked_decoded_signature = u'my_signature='
# Note: this course_id is actually a course_key
context_id = self.item_descriptor.course_id.to_deprecated_string()
user_id = unicode(self.item_descriptor.xmodule_runtime.anonymous_student_id)
hostname = self.item_descriptor.xmodule_runtime.hostname
resource_link_id = unicode(urllib.quote('{}-{}'.format(hostname, self.item_descriptor.location.html_id())))
sourcedId = "{context}:{resource_link}:{user_id}".format(
context=urllib.quote(context_id),
resource_link=resource_link_id,
user_id=user_id
)
self.correct_headers = {
u'user_id': user_id,
u'oauth_callback': u'about:blank',
u'launch_presentation_return_url': '',
u'lti_message_type': u'basic-lti-launch-request',
u'lti_version': 'LTI-1p0',
u'roles': u'Student',
u'context_id': context_id,
u'resource_link_id': resource_link_id,
u'lis_result_sourcedid': sourcedId,
u'oauth_nonce': mocked_nonce,
u'oauth_timestamp': mocked_timestamp,
u'oauth_consumer_key': u'',
u'oauth_signature_method': u'HMAC-SHA1',
u'oauth_version': u'1.0',
u'oauth_signature': mocked_decoded_signature
}
saved_sign = oauthlib.oauth1.Client.sign
self.expected_context = {
'display_name': self.item_descriptor.display_name,
'input_fields': self.correct_headers,
'element_class': self.item_descriptor.category,
'element_id': self.item_descriptor.location.html_id(),
'launch_url': u'http://www.example.com', # default value
'open_in_a_new_page': True,
'form_url': self.item_descriptor.xmodule_runtime.handler_url(self.item_descriptor,
| 'preview_handler').rstrip('/?'),
'hide_launch': False,
'has_score': False,
'module_score': None,
'comment': | u'',
'weight': 1.0,
'ask_to_send_username': self.item_descriptor.ask_to_send_username,
'ask_to_send_email': self.item_descriptor.ask_to_send_email,
'description': self.item_descriptor.description,
'button_text': self.item_descriptor.button_text,
'accept_grades_past_due': self.item_descriptor.accept_grades_past_due,
}
def mocked_sign(self, *args, **kwargs):
"""
Mocked oauth1 sign function.
"""
# self is <oauthlib.oauth1.rfc5849.Client object> here:
__, headers, __ = saved_sign(self, *args, **kwargs)
# we should replace nonce, timestamp and signed_signature in headers:
old = headers[u'Authorization']
old_parsed = OrderedDict([param.strip().replace('"', '').split('=') for param in old.split(',')])
old_parsed[u'OAuth oauth_nonce'] = mocked_nonce
old_parsed[u'oauth_timestamp'] = mocked_timestamp
old_parsed[u'oauth_signature'] = mocked_signature_after_sign
headers[u'Authorization'] = ', '.join([k + '="' + v + '"' for k, v in old_parsed.items()])
return None, headers, None
patcher = mock.patch.object(oauthlib.oauth1.Client, "sign", mocked_sign)
patcher.start()
self.addCleanup(patcher.stop)
def test_lti_constructor(self):
generated_content = self.item_descriptor.render(STUDENT_VIEW).content
expected_content = self.runtime.render_template('lti.html', self.expected_context)
self.assertEqual(generated_content, expected_content)
def test_lti_preview_handler(self):
generated_content = self.item_descriptor.preview_handler(None, None).body
expected_content = self.runtime.render_template('lti_form.html', self.expected_context)
self.assertEqual(generated_content, expected_content)
@attr(shard=1)
class TestLTIModuleListing(SharedModuleStoreTestCase):
"""
a test for the rest endpoint that lists LTI modules in a course
"""
# arbitrary constant
COURSE_SLUG = "100"
COURSE_NAME = "test_course"
@classmethod
def setUpClass(cls):
super(TestLTIModuleListing, cls).setUpClass()
cls.course = CourseFactory.create(display_name=cls.COURSE_NAME, number=cls.COURSE_SLUG)
cls.chapter1 = ItemFactory.create(
parent_location=cls.course.location,
display_name="chapter1",
category='chapter')
cls.section1 = ItemFactory.create(
parent_location=cls.chapter1.location,
display_name="section1",
category='sequential')
cls.chapter2 = ItemFactory.create(
parent_location=cls.course.location,
display_name="chapter2",
category='chapter')
cls.section2 = ItemFactory.create(
parent_location=cls.chapter2.location,
display_name="section2",
category='sequential')
# creates one draft and one published lti module, in different sections
cls.lti_published = ItemFactory.create(
parent_location=cls.section1.location,
display_name="lti published",
category="lti",
location=cls.course.id.make_usage_key('lti', 'lti_published'),
)
cls.lti_draft = ItemFactory.create(
parent_location=cls.section2.location,
display_name="lti draft",
category="lti",
location=cls.course.id.make_usage_key('lti', 'lti_draft'),
publish_item=False,
)
def expected_handler_url(self, handler):
"""convenience method to get the reversed handler urls"""
return "https://{}{}".format(settings.SITE_NAME, reverse(
'courseware.module_render.handle_xblock_callback_noauth',
args=[
self.course.id.to_deprecated_string(),
quote_slashes(unicode(self.lti_published.scope_ids.usage_id.to_deprecated_string()).encode('utf-8')),
handler
]
))
def test_lti_rest_bad_course(self):
"""Tests what happens when the lti listing rest endpoint gets a bad course_id"""
bad_ids = [u"sf", u"dne/dne/dne", u"fo/ey/\\u5305"]
for bad_course_id in bad_ids:
lti_rest_endpoints_url = 'courses/{}/lti_rest_endpoints/'.format(bad_course_id)
response = self.client.get(lti_rest_endpoints_url)
self.assertEqual(404, response.status_code)
def test_lti_rest_listing(self):
"""tests that the draft lti module is part of the endpoint response"""
request = mock.Mock()
request.method = 'GET'
response = get_course_lti_endpoints(request, course_id=self.course.id.to_deprecated_string())
self.assertEqual(200, response.status_code)
self.assertEqual('application/json', response['Content-Ty |
from xblock.fragment import Fragment
from xmodule.x_module import XModule
from xmodule.seq_module import SequenceDescriptor
from xmodule.progress import Progress
from pkg_resources import resource_string
# HACK: This shouldn't be hard-coded to two types
# OBSOLETE: This obsoletes 'type'
class_priority = ['video', 'problem']
class VerticalFields(object):
has_children = True
class VerticalModule(VerticalFields, XModule):
''' Layout module for laying out submodules vertically.'''
def student_view(self, context):
fragment = Fragment()
contents = []
for child in sel | f.get_display_items():
rendered_child = child.render('student_view', context)
fragment | .add_frag_resources(rendered_child)
contents.append({
'id': child.id,
'content': rendered_child.content
})
fragment.add_content(self.system.render_template('vert_module.html', {
'items': contents
}))
return fragment
def mobi_student_view(self, context):
fragment = Fragment()
contents = []
for child in self.get_display_items():
rendered_child = child.render('mobi_student_view', context)
fragment.add_frag_resources(rendered_child)
contents.append({
'id': child.id,
'content': rendered_child.content
})
fragment.add_content(self.system.render_template('vert_module.html', {
'items': contents
}))
return fragment
def get_progress(self):
# TODO: Cache progress or children array?
children = self.get_children()
progresses = [child.get_progress() for child in children]
progress = reduce(Progress.add_counts, progresses, None)
return progress
def get_icon_class(self):
child_classes = set(child.get_icon_class() for child in self.get_children())
new_class = 'other'
for c in class_priority:
if c in child_classes:
new_class = c
return new_class
class VerticalDescriptor(VerticalFields, SequenceDescriptor):
module_class = VerticalModule
js = {'coffee': [resource_string(__name__, 'js/src/vertical/edit.coffee')]}
js_module_name = "VerticalDescriptor"
# TODO (victor): Does this need its own definition_to_xml method? Otherwise it looks
# like verticals will get exported as sequentials...
|
import require_torch, require_torchaudio
from ..test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import Speech2TextFeatureExtractor
global_rng = random.Random()
def floats_list(shape, scale=1.0, rng=None, name=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = global_rng
values = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
@require_torchaudio
class Speech2TextFeatureExtractionTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=7,
min_seq_length=400,
max_seq_length=2000,
feature_size=24,
num_mel_bins=24,
padding_value=0.0,
sampling_rate=16_000,
return_attention_mask=True,
do_normalize=True,
):
self.parent = parent
self.batch_size = batch_size
self.min_seq_length = min_seq_length
self.max_seq_length = max_seq_length
self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
self.feature_size = feature_size
self.num_mel_bins = num_mel_bins
self.padding_value = padding_value
self.sampling_rate = sampling_rate
self.return_attention_mask = return_attention_mask
self.do_normalize = do_normalize
def prepare_feat_extract_dict(self):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def prepare_inputs_for_common(self, equal_length=False, numpify=False):
def _flatten(list_of_lists):
return list(itertools.chain(*list_of_lists))
if equal_length:
speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
speech_inputs = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff)
]
if numpify:
speech_inputs = [np.asarray(x) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class Speech2TextFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase):
feature_extraction_class = Speech2TextFeatureExtractor if is_speech_available() else None
def setUp(self):
self.feat_extract_tester = Speech2TextFeatureExtractionTester(self)
def _check_zero_mean_unit_variance(self, input_vector):
self.assertTrue(np.all(np.mean(input_vector, axis=0) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(input_vector, axis=0) - 1) < 1e-3))
def test_call(self):
# Tests that all call wrap to encode_plus and batch_encode_plus
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs]
# Test feature size
input_features = feature_extractor(np_speech_inputs, padding=True, return_tensors="np").input_features
self.assertTrue(input_features.ndim == 3)
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size)
# Test not batched input
encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features
encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features
self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3))
# Test batched
encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features
encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
def test_cepstral_mean_and_variance_normalization(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
paddings = ["longest", "max_length", "do_not_pad"]
max_lengths = [None, 16, None]
for max_length, padding in zip(max_lengths, paddings):
inputs = feature_extractor(
speech_inputs, padding=padding, max_length=max_length, return_attention_mask=True
)
input_features = inputs.input_features
attention_mask = inputs.attention_mask
fbank_feat_lengths = [np.sum(x) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]])
def test_cepstral_mean_and_vari | ance_normalization_np(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
speech_ | inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
paddings = ["longest", "max_length", "do_not_pad"]
max_lengths = [None, 16, None]
for max_length, padding in zip(max_lengths, paddings):
inputs = feature_extractor(
speech_inputs, max_length=max_length, padding=padding, return_tensors="np", return_attention_mask=True
)
input_features = inputs.input_features
attention_mask = inputs.attention_mask
fbank_feat_lengths = [np.sum(x) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]])
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]])
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]])
def test_cepstral_mean_and_variance_normalization_trunc_max_length(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
inputs = feature_extractor(
speech_inputs,
padding="max_length",
max_length=4,
truncation=True,
return_tensors="np",
return_attention_mask=True,
)
input_features = inputs.input_features
attention_mask = inputs.attention_mask
fbank_feat_lengths = np.sum(attention_mask == 1, axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1])
self._check_zero_mean_unit_variance(input_features[2])
def test_cepstral_mean_and_variance_normalization_trunc_longest(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
inputs = feature_extractor(
speech_inputs,
padding="longest",
max_length=4,
truncation=True,
return_tensors="np",
return_attention_mask=True,
)
input_features = inputs.input_f |
from django.conf.urls | import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', include('lau | nch.urls', namespace="launch", app_name="launch")),
url(r'^admin/', include(admin.site.urls)),
)
|
#!/usr/bin/env python
import pyflag.IO as IO
import pyflag.Registry as | Registry
Registry.Init()
import pyflag.FileSystem as FileSystem
from FileSystem import DBFS
case = "demo"
## This gives us a handle to the VFS
fsfd = Registry.FILESYSTEMS.fs['DBFS'](case)
## WE just open a file in the VFS:
#fd=fsfd.open(inode="Itest|S1/2")
## And read it
#print fd.read() | |
"""
Provides the base class for all quadrature rules.
"""
import numpy as np
import copy
class QuadRule(object):
"""
Provides an abstract base class for all quadrature rules.
Parameters
----------
order : int
The polynomial order up to which the quadrature should be exact
"""
def __init__(self, order, dimension):
self._order = order
self._dimension = dimension
s | elf._points = [None] * (dimension + 1)
self._weights = [None] * (dimension + 1)
self._set_data()
def _set_data(self):
"""
Sets the quadrature points and weights.
"""
raise NotImplementedError()
@property
def order(self):
return self._order
@property
def dimension(self):
| return self._dimension
@property
def points(self):
return copy.deepcopy(self._points)
@property
def weights(self):
return copy.deepcopy(self._weights)
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for BatchDeleteIntents
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest publi | shed package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_generated_dialogflow_v2_Intents_BatchDeleteIntents_sync]
from google.cloud import dialogflow_v2
def sample_batch_ | delete_intents():
# Create a client
client = dialogflow_v2.IntentsClient()
# Initialize request argument(s)
intents = dialogflow_v2.Intent()
intents.display_name = "display_name_value"
request = dialogflow_v2.BatchDeleteIntentsRequest(
parent="parent_value",
intents=intents,
)
# Make the request
operation = client.batch_delete_intents(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END dialogflow_generated_dialogflow_v2_Intents_BatchDeleteIntents_sync]
|
# dj | ango
default_app_co | nfig = "zentral.contrib.okta.apps.ZentralOktaAppConfig"
|
import os
import ConfigParser
import click
from base64 import b64enc | ode
import requests
import json
class B3Notify(object):
"""
Build status notifier for bitbucket server
"""
def __init__(self, home='~/.b3notifyrc'):
self.home = home
self.verbose = True
self.config = {}
self.build_url = ''
self.key = ''
self.name = ''
self.commit = ''
self.auth = ''
def read_configuration(self, profile='default'):
config = Co | nfigParser.ConfigParser()
config.read([
os.path.expanduser('~/.b3notifyrc'),
'.b3notifyrc',
os.path.expanduser('{0}'.format(self.home)),
])
self.url = config.get(profile, 'url').strip("'")
self.username = config.get(profile, 'username').strip("'")
self.password = config.get(profile, 'password').strip("'")
self.auth = '{0}'.format(
b64encode('{0}:{1}'.format(self.username, self.password))
)
@property
def headers(self):
return {
'Content-Type': 'application/json',
'Authorization': 'Basic {0}'.format(self.auth)
}
def notify(
self, commit, build_url, build_key, build_name,
build_state='FAIL'):
data = {
# <INPROGRESS|SUCCESSFUL|FAILED>",
'state': build_state,
'key': build_key,
'name': build_name,
'url': build_url
}
self.commit_url = '{0}{1}'.format(self.url, commit)
response = requests.post(
self.commit_url,
headers=self.headers,
data=json.dumps(data))
return response
@click.command()
@click.option(
'--config-file', envvar='CONFIG_FILE', default='.',
help='Location to find configuration file')
@click.option(
'--profile', default='default',
help='Profile to use for credentials')
@click.option(
'--host', '-h',
help='Server URL')
@click.option(
'--verbose', '-v', is_flag=True,
help='Enable verbose mode')
@click.option(
'--success', '-s', is_flag=True, default=False,
help='Notify build success')
@click.option(
'--fail', '-f', is_flag=True, default=False,
help='Notify build failure')
@click.option(
'--progress', '-p', is_flag=True, default=False,
help='Notify inprogress build')
@click.option(
'--commit', '-c', envvar='GIT_COMMIT',
help='Hash value of the commit')
@click.option(
'--build-url', '-b', envvar='BUILD_URL',
help='Current build url')
@click.option(
'--key', '-k', envvar='BUILD_TAG',
help='Build key')
@click.option(
'--name', '-n', envvar='BUILD_DISPLAY_NAME',
help='Build name')
@click.option(
'--auth', '-a', envvar='BUILD_AUTH', required=False,
help='Base64 encoded string of username:password')
def cli(
config_file, profile, host, verbose, success, fail, progress,
commit, build_url, key, name, auth):
"""
Build status notifier for bitbucket server
"""
build_state = 'INPROGRESS'
notify = B3Notify(config_file)
notify.read_configuration(profile=profile)
notify.verbose = verbose
if host is not None:
notify.url = host
if auth is not None:
notify.auth = auth
if success is True:
build_state = 'SUCCESSFUL'
if fail is True:
build_state = 'FAILED'
response = notify.notify(
commit=commit,
build_url=build_url,
build_key=key,
build_name=name,
build_state=build_state)
print response.status_code, response.text
|
# import the libraries that you need
import requests
import csv
# make a GET request to the OneSearch X-Service API
response = requests.get('http://onesearch.cuny.edu/PrimoWebServices'
'/xservice/search/brief?'
'&institution=KB'
'&query=any,contains,obama'
'&query=facet_rtype,exact,books'
'&loc=adaptor,primo_central_multiple_fe'
'&loc=local,scope:(KB,AL,CUNY_BEPRESS)'
'&json=true')
# take the JSON from the response
# and store it in a | variable called alldata
alldata = response.json()
# drill down into a smaller subset of the json
# and print this smaller bit of json
somedata = alldata['SEGMENTS']['JAGROOT']['RESULT']['FACETLIST']['FACET']\
[1]['FACET_VALUES']
print(somedata)
# open a file called mycsv.csv, then loop through the dat | a
# and write to that file
with open('mycsv.csv', 'wb') as f:
writer = csv.writer(f)
for x in somedata:
writer.writerow([x['@KEY'], x['@VALUE']])
|
from __future__ im | port print_function, division
import numpy as np
class Tuning():
"""
Equal temperament tuning - allows to convert between frequency and pitch.
- unit pitch space
- continous, unbounded
- 1.0 ~ one octave
| - step pitch space
- continous, unbounded
- N steps ~ one octave
- unit pitch space * N
- unit pitch class space
- continous, bounded [0, 1.0)
- unit pitch space % 1.0
- step pitch class space
- continous, bounded [0, N)
- unit step pitch space % N
- integer step pitch space
- discrete, unbounded
- floor(step pitch space)
- integer step pitch class space
- discrete, bounded {0, 1, .. N - 1}
- floor(step pitch class space)
"""
def __init__(self, base_freq=440, steps_per_octave=12, octave_ratio=2):
self.base_freq = base_freq
self.steps_per_octave = steps_per_octave
self.octave_ratio = octave_ratio
def pitch_to_freq(self, pitch):
factor = self.pitch_to_relative_freq(pitch)
return factor * self.base_freq
def freq_to_pitch(self, freq):
rel_freq = freq / self.base_freq
if self.octave_ratio == 2:
p = np.log2(rel_freq)
else:
p = np.log(rel_freq) / np.log(2)
return p * self.steps_per_octave
def pitch_to_relative_freq(self, pitch):
return pow(self.octave_ratio, pitch / self.steps_per_octave)
class PitchQuantizer():
def __init__(self, tuning, bin_division=1):
self.tuning = tuning
self.bin_division = bin_division
def quantize(self, freqs):
"""
Quantizes frequencies to nearest pitch bins (with optional division of
bins).
"""
return np.round(self.tuning.freq_to_pitch(freqs) * self.bin_division) / self.bin_division
|
e listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
class WebGLConformanceExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('gl-enable-vertex-attrib.html',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
# Fails everywhere.
self.Skip('conformance/glsl/misc/large-loop-compile.html',
bug=322764)
self.Skip('conformance/textures/texture-size-limit.html',
bug=322789)
# Windows failures.
self.Fail('conformance/ogles/GL/atan/atan_001_to_008.html',
['win'], bug=322794)
self.Fail('conformance/ogles/GL/atan/atan_009_to_012.html',
['win'], bug=322794)
self.Skip('conformance/ogles/GL/control_flow/control_flow_001_to_008.html',
['win'], bug=322795)
# Windows/Intel failures
self.Fail('conformance/textures/texture-size.html',
['win', 'intel'], bug=121139)
self.Fail('conformance/rendering/gl-scissor-test.html',
['win', 'intel'], bug=314997)
# Windows/AMD failures
self.Fail('conformance/rendering/more-than-65536-indices.html',
['win', 'amd'], bug=314997)
# Windows 7/Intel failures
self.Fail('conformance/context/context-lost-restored.html',
['win7', 'intel'])
self.Fail('conformance/context/premultiplyalpha-test.html',
['win7', 'intel'])
self.Fail('conformance/extensions/oes-texture-float-with-image-data.html',
['win7', 'intel'])
self.Fail('conformance/extensions/oes-texture-float.html',
['win7', 'intel'])
self.Fail('conformance/limits/gl-min-attribs.html',
['win7', 'intel'])
self.Fail('conformance/limits/gl-max-texture-dimensions.html',
['win7', 'intel'])
self.Fail('conformance/limits/gl-min-textures.html',
['win7', 'intel'])
self.Fail('conformance/limits/gl-min-uniforms.html',
['win7', 'intel'])
self.Fail('conformance/rendering/gl-clear.html',
['win7', 'intel'])
self.Fail('conformance/textures/copy-tex-image-and-sub-image-2d.html',
['win7', 'intel'])
self.Fail('conformance/textures/gl-teximage.html',
['win7', 'intel'])
self.Fail('conformance/textures/tex-image-and-sub-image-2d-with-array-buffer-view.html',
['win7', 'intel'])
self.Fail('conformance/textures/tex-image-and-sub-image-2d-with-image-data.html',
['win7', 'intel'])
self.Fail('conformance/textures/tex-image-and-sub-image-2d-with-image-data-rgb565.html',
['win7', 'intel'])
self.Fail('conformance/textures/tex-image-and-sub-image-2d-with-image-data-rgba4444.html',
['win7', 'intel'])
self.Fail('conformance/textures/tex-image-and-sub-image-2d-with-image-data-rgba5551.html',
['win7', 'intel'])
self.Fail('conformance/textures/tex-image-with-format-and-type.html',
['win7', 'intel'])
self.Fail('conformance/textures/tex-sub-image-2d.html',
['win7', 'intel'])
self.Fail('conformance/textures/texparameter-test.html',
['win7', 'intel'])
self.Fail('conformance/textures/texture-active-bind-2.html',
['win7', 'intel'])
self.Fail('conformance/textures/texture-active-bind.html',
['win7', 'intel'])
self.Fail('conformance/textures/texture-complete.html',
['win7', 'intel'])
self.Fail('conformance/textures/texture-formats-test.html',
['win7', 'intel'])
self.Fail('conformance/textures/texture-mips.html',
['win7', 'intel'])
self.Fail('conformance/textures/texture-npot.html',
['win7', 'intel'])
self.Fail('conformance/textures/texture-size-cube-maps.html',
['win7', 'intel'])
self.Fail('conformance/context/context-attribute-preserve-drawing-buffer.html',
['win7', 'intel'], bug=322770)
# Mac failures.
self.Fail('conformance/glsl/misc/shaders-with-varyings.html',
['mac'], bug=322760)
self.Fail('conformance/context/context-attribute-preserve-drawing-buffer.html',
['mac'], bug=322770)
self.Skip('conformance/ogles/GL/control_flow/control_flow_001_to_008.html',
['mac'], bug=322795)
# Mac/Intel failures
self.Fail('conformance/rendering/gl-scissor-test.html',
['mac', 'intel'], bug=314997)
# The following two tests hang the WindowServer.
self.Skip('conformance/canvas/drawingbuffer-static-canvas-test.html',
['mac', 'intel'], bug=303915)
self.Skip('conformance/canvas/drawingbuffer-test.html',
['mac', 'intel'], bug=303915)
# The following three tests only fail.
# Radar 13499677
self.Fail(
'conformance/glsl/functions/glsl-function-smoothstep-gentype.html',
['mac', 'intel'], bug=225642)
# Radar 13499466
self.Fail('conformance/limits/gl-max-texture-dimensions.html',
['mac', 'intel'], bug=225642)
# Radar 13499623
self.Fail('conformance/textures/texture-size.html',
['mac', 'intel'], bug=225642)
self.Skip('conformance/ogles/GL/control_flow/control_flow_009_to_010.html',
['mac', 'intel'], bug=322795)
self.Fail('conformance/ogles/GL/operators/operators_009_to_016.html',
['mac', 'intel'], bug=322795)
# Mac/Intel failures on 10.7
self.Skip('conformance/glsl/functions/glsl-function-asin.html',
['lion', 'intel'])
self.Skip('conformance/glsl/functions/glsl-function-dot.html',
['lion', 'intel'])
self.Skip('conformance/glsl/functions/glsl-function-faceforward.html',
['lion', 'intel'])
self.Skip('conformance/glsl/functions/glsl-function-length.html',
['lion', 'intel'])
self.Skip('conformance/glsl/functions/glsl-function-normalize.html',
['lion', 'intel'])
self.Skip('conformance/glsl/functions/glsl-function-reflect.html',
['lion', 'intel'])
self.Skip(
'conformance/glsl/functions/glsl-function-smoothstep-gentype.html',
['lion', 'intel'])
self.Skip('conformance/limits/gl-max-texture-dimensions.html',
['lion', 'intel'])
self.Skip('conformance/rendering/line-loop-tri-fan.html',
['lion', 'intel'])
self.Skip('conformance/ogles/GL/control_flow/control_flow_009_to_010.html',
['lion'], bug=322795)
self.Skip('conformance/ogles/GL/dot/dot_001_to_006.html',
['lion', 'intel'], bug=323736)
self.Skip('conformance/ogles/GL/faceforward/faceforward_001_to_006.html',
['lion', 'intel'], bug=323736)
self.Skip('conformance/ogles/GL/length/length_001_to_006.html',
['lion', 'intel'], bug=323736)
self.Skip('conformance/ogles/GL/normalize/normalize_001_to_006.html',
['lion', 'intel'], bug=323736)
self.Skip('conformance/ogles/GL/reflect/reflect_001_to_006.html',
['lion', 'intel'], bug=323736)
self.Skip('conformance/ogles/GL/refract/refract_001_to_006.html',
['lion', 'intel'], bug=323736)
self.Skip('conformance/ogles/GL/tan/tan_001_to_006.html',
['lion', 'intel'], bug=323736)
# Mac/ATI failures
self.Skip('conformance/extensions/oes-texture-float-with-image-data.html',
['mac', 'amd'], bug=308328)
self.Skip('conformance/rendering/gl-clear.html',
['mac', 'amd'], bug=308328)
self.Skip('conformance/textures/tex-image-and-sub-image-2d-with-array-buffer-view.html',
['mac', 'amd'], bug=308328)
self.Skip('conformance/textures/tex-image-and-sub-image-2d-with-image-data.html',
['mac', 'amd'], bug=308328)
self.Skip('conformance/textures/tex-image-and-sub-image-2d-with-image-data-rgb565.html',
['mac', 'amd'], bug=308328)
self.Skip('conformance/textures/tex-image-and-sub-image-2d-with-image-data-rgba4444.html',
['mac', 'amd'], bug=308328)
self.Skip('conformance/textures/tex-image-and-sub-image-2d-with-image-data-rgba5551.html',
['mac', 'amd'], bug=308328)
self.Fail('conformance/canva | s/drawingbuffer-test.html',
['mac', 'amd'], bug=314997)
# Linux/NVIDIA failures
self.Fail('conformance/glsl/misc/empty_main.vert.html',
['linux', ('nvidia', 0x1040 | )], bug=325884)
self.Fail('conform |
esolv_file):
return False
self.log("Updating jail hostname to `%s-%s`" % (self.vm.name, jail.jail_type))
if not self._update_hostname(jail, rc_file, host_file):
return False
self.log("Writing yum repository.")
if not self._writeYumRepoConf(yum_repo, yum_file):
return False
self.log("Creating jail.")
if not self._createJail(jail):
return False
return True
def _writeKeys(self, jail, authorized_key_file):
'''Write authorized keys'''
try:
with open(authorized_key_file, 'w') as f:
for key in self.vm.keys.values():
f.write("%s\n" % key['key'])
except IOError as e:
| msg = "Error while writing authorized keys to jail `%s`: %s"
self.log(msg % (jail.jail_type, e))
return False
return True
def _update_hostname(self, jail, rc | _file, host_file):
hostname = "%s-%s" % (self.vm.name, jail.jail_type)
self.log("Replacing hostname in %s" % rc_file)
(fh, abspath) = tempfile.mkstemp()
has_hostname = False
tmp = open(abspath, 'w')
with open(rc_file, 'r') as f:
for line in f:
if not line.startswith('hostname'):
tmp.write(line)
continue
tmp.write('hostname="%s"\n' % hostname)
has_hostname = True
if not has_hostname:
tmp.write('hostname="%s"\n' % hostname)
tmp.close()
os.close(fh)
os.remove(rc_file)
shutil.move(abspath, rc_file)
os.chmod(rc_file, 0644)
self.log("Adding new hostname in %s" % host_file)
(fh, abspath) = tempfile.mkstemp()
has_hostname = False
tmp = open(abspath, 'w')
with open(host_file, 'r') as f:
for line in f:
if not line.startswith('127.0.0.1'):
tmp.write(line)
continue
tmp.write('%s %s\n' % (line.replace('\n', ''), hostname))
tmp.close()
os.close(fh)
os.remove(host_file)
shutil.move(abspath, host_file)
os.chmod(host_file, 0644)
return True
def _writeResolvConf(self, jail, resolv_file):
'''Copy resolv.conf'''
try:
shutil.copyfile('/etc/resolv.conf', resolv_file)
except IOError as e:
self.log("Error while copying host resolv file: %s" % e)
return False
return True
def _writeYumRepoConf(self, yum_repo, yum_file):
'''Setup yum repo.d file ezjail will use.'''
try:
with open(yum_file, 'w') as f:
f.write(yum_repo['data'])
except (KeyError, IOError) as e:
self.log("Error while writing YUM repo data: %s" % e)
return False
return True
def _createJail(self, jail):
'''Create the jail'''
try:
jail.create()
except OSError as e:
msg = "Error while installing jail `%s`: %s"
self.log(msg % (jail.jail_type, e))
return False
return True
class JailStartupTask(SetupTask):
'''
Handles starting each jail.
'''
def run(self):
# Start each jail
for jail in self.vm.jails:
self.log("Starting jail `%s`" % jail.jail_type)
try:
status = jail.start()
except OSError as e:
self.log("Could not start jail `%s`: %s" % (jail.jail_type, e))
return False
self.log("Jail status: %s" % status)
self.log("Jail `%s` started" % jail.jail_type)
if not jail.status():
self.log("Jail `%s` is not running!" % jail.jail_type)
return False
return True
class SetupWorkerThread(threading.Thread):
"""
Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition.
"""
def __init__(self, bus, queue, outqueue, puck):
super(self.__class__, self).__init__()
self._stop = threading.Event()
self.running = threading.Event()
self.successful = False
self.completed = False
self._queue = queue
self._bus = bus
self._outqueue = outqueue
self._puck = puck
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def _step(self):
'''
Run a task
@raise RuntimeError when the task failed to complete
'''
# This will probably need to be wrapped in a try/catch.
task = self._queue.get(True, 10)(self._puck, self._outqueue)
loginfo = (self.__class__.__name__, task.__class__.__name__)
task.log('Starting')
if not task.run():
raise RuntimeError("%s error while running task `%s`" % loginfo)
task.log('Completed')
self._queue.task_done()
def run(self):
if self.completed:
self._bus.log("%s had already been run." % self.__class__.__name__)
return False
if self.running.isSet():
self._bus.log("%s is already running." % self.__class__.__name__)
return False
self.running.set()
self._bus.log("%s started." % self.__class__.__name__)
try:
while not self.stopped():
self._step()
except RuntimeError as err:
self._bus.log(str(err))
self._empty_queue()
self._puck.getVM().status = 'setup_failed'
self._puck.updateStatus()
self.succesful = False
self.completed = True
return False
except queue.Empty:
pass
self.completed = True
self.sucessful = True
self._puck.getVM().status = 'setup_complete'
self._puck.updateStatus()
self._outqueue.put("%s finished." % self.__class__.__name__)
def _empty_queue(self):
while not self._queue.empty():
try:
self._queue.get(False)
except queue.Empty:
return
class SetupPlugin(plugins.SimplePlugin):
'''
Handles tasks related to virtual machine setup.
The plugin launches a separate thread to asynchronously execute the tasks.
'''
def __init__(self, puck, bus, freq=30.0):
plugins.SimplePlugin.__init__(self, bus)
self.freq = freq
self._puck = puck
self._queue = queue.Queue()
self._workerQueue = queue.Queue()
self.worker = None
self.statuses = []
def start(self):
self.bus.log('Starting up setup tasks')
self.bus.subscribe('setup', self.switch)
start.priority = 70
def stop(self):
self.bus.log('Stopping down setup task.')
self._setup_stop();
def switch(self, *args, **kwargs):
'''
This is the task switchboard. Depending on the parameters received,
it will execute the appropriate action.
'''
if not 'action' in kwargs:
self.log("Parameter `action` is missing.")
return
# Default task
def default(**kwargs):
return
return {
'start': self._setup_start,
'stop': self._setup_stop,
'status': self._setup_status,
'clear': self._clear_status
}.get(kwargs['action'], default)()
def _clear_status(self, **kwargs):
'''Clear the status list'''
del(self.statuses[:])
def _setup_stop(self, **kwargs):
self.bus.log("Received stop request.")
if self.worker and self.worker.isAlive():
self.worker.stop()
def _start_worker(self):
self.worker = SetupWorkerThread(
bus=self.bus, queue = self._queue,
outqueue = self._workerQueue, puck = self._puck
)
self.worker.start()
def _setup_start(self, **kwargs):
self.bus.log("Received start request.")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.