text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import sys, numpy
import util.AOMLinterpolation as interp_helper
import util.AOMLnetcdf as read_netcdf
def test(p, parameters):
qc = numpy.zeros(p.n_levels(), dtype=bool)
# check for gaps in data
isTemperature = (p.t().mask==False)
isDepth = (p.z().mask==False)
isData = isTemperature & isDepth
# extract climatology data
lonlatWithTempsList1, depthColumns1, latLonsList1 = subset_climatology_data(p.longitude(), p.latitude(), "analyzed mean")
lonlatWithTempsList2, depthColumns2, latLonsList2 = subset_climatology_data(p.longitude(), p.latitude(), "standard deviations")
for i in range(p.n_levels()):
# find best interpolated temperature and standard deviation at this depth
if not isData[i]: continue
interpTemp = interp_helper.temperature_interpolation_process(p.longitude(), p.latitude(), p.z()[i], depthColumns1, latLonsList1, lonlatWithTempsList1, False, "climaInterpTemperature")
if interpTemp == 99999.99:
continue
interpTempSD = interp_helper.temperature_interpolation_process(p.longitude(), p.latitude(), p.z()[i], depthColumns2, latLonsList2, lonlatWithTempsList2, False, "climaInterpStandardDev")
if interpTempSD == 99999.99:
continue
# check if temperature at this depth is sufficiently close to the climatological expectation
qc[i] = climatology_check(p.t()[i], interpTemp, interpTempSD) >= 4
return qc
def climatology_check(temperature, interpMNTemp, interpSDTemp, sigmaFactor=5.0):
"""
temperature: Float for temperature
interpMNTemp: interpolated temperature from climatology file
interpSDTemp: interpolated standard deviation from climatology file
sigmaFactor: tolerated deviation from climatological temperature, in standard deviations.
"""
if interpMNTemp == 99999.99 or interpSDTemp == 99999.99 or interpSDTemp <= 0.0:
return 0
if abs(temperature-interpMNTemp)/interpSDTemp <= sigmaFactor:
return 1
else:
return 4
def subset_climatology_data(longitude, latitude, statType, coordRange=1, filePathName='data/woa13_00_025.nc'):
"""
longitude: float
latitude: float
statType: either 'analyzed mean' or 'standard deviations'
coordRange: degrees plus / minus around longitude and latitude to consider.
filePathName: relative path from root of climatology file
Return list of lists with temperatures that maps one to one with list
of lists with tuples of latitude and longitude coordinates, list for
depth measurements, and list of lists with tuples of latitude and
longitude coordinates that maps one to one with list of lists with
temperature
Return an empty list, an empty list, and an empty list if exception
"""
if statType == "analyzed mean":
fieldType = "t_an"
elif statType == "standard deviations":
fieldType = "t_sd"
else:
sys.stderr.write("Cannot process climatology file with a statistical "
"field as " + statType + "\n")
return [], [], []
latLonDepthTempList, depthColumns, latLonList, time = read_netcdf.subset_data(longitude, latitude, filePathName, coordRange, True, fieldType)
return latLonDepthTempList, depthColumns, latLonList
|
{
"content_hash": "3b3770ef688b931ece68b0f1120237bc",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 193,
"avg_line_length": 40.5875,
"alnum_prop": 0.7182014166923314,
"repo_name": "s-good/AutoQC",
"id": "e72aa77c89e96e76455e206ccd6af165d63e8fcc",
"size": "3397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qctests/AOML_climatology_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "342"
},
{
"name": "Python",
"bytes": "732404"
},
{
"name": "Shell",
"bytes": "2489"
}
],
"symlink_target": ""
}
|
import optparse
import os, sys
from biokbase.probabilistic_annotation.DataProcessor import *
usage="%prog [options] -f filedir organismid"
description="""Main driver to run probabilistic annotation.
First generates a list of Query-independent (i.e. same for all queries)
data from the KBase ER model. This includes lists of OTUs (pre-computed
as part of the KBase) and the members of subsystems that are in those OTUs;
also included are their roles.
Then, it uses the provided organism ID to look for a JSON file and, if
it does not exist already, generates one for you from the central store (note
that the function to do this for now actually generates a new base genome ID for you
- this is ignored)...
Finally, it does a BLAST against the pre-computed data, and uses that result
to come up with annotation probabilities.
"""
parser = optparse.OptionParser(usage=usage, description=description)
parser.add_option("-r", "--regenerate", help="Regenerate database if it already exists (NOTE - takes a long time)",
action="store_true", dest="regenerate", default=False)
parser.add_option("-f", "--folder", help="Name of directory (folder) in which to store organism-independent data files (from KBase) - REQUIRED",
action="store", type="str", dest="folder", default=None)
(options, args) = parser.parse_args()
if options.folder is None:
sys.stderr.write("ERROR: Folder -f (in which organism-independent data is stored or will be generated if doesnt exist) is mandatory\n")
exit(2)
if len(args) < 2:
sys.stderr.write("ERROR: Organism ID is a required argument\n")
exit(2)
# If the output folder doesn't already exist, create it.
try:
os.mkdir(options.folder)
except OSError:
pass;
#try:
# os.mkdir(os.path.join("data", "OTU"))
#except OSError:
# pass;
# Run the extractor driver to get the data (tnis requires
# wrapping with wrap-python)
cmd = "probanno-ExtractorDriver -f %s" %(options.folder)
if options.regenerate:
cmd += cmd + " -r"
os.system(cmd)
# Now we run all the organism-specific stuff.
# All of the results are saved to [organismid]/[organismid].* where different extensions
# are different calculated data.
organismid = args[0]
probannoid = args[1]
fasta_file, json_file = setUpQueryData(options.folder, organismid)
blast_result_file = runBlast(options.folder, organismid, fasta_file, options.folder)
roleset_probability_file = RolesetProbabilitiesMarble(options.folder, organismid, blast_result_file, options.folder)
role_probability_file = RolesetProbabilitiesToRoleProbabilities(options.folder, organismid, roleset_probability_file)
total_role_probability_file = TotalRoleProbabilities(options.folder, organismid, role_probability_file)
complex_probability_file = ComplexProbabilities(options.folder, organismid, total_role_probability_file, options.folder)
reaction_probability_file = ReactionProbabilities(options.folder, organismid, complex_probability_file, options.folder)
outfile = os.path.join(options.folder, organismid, "%s_prob.json" %(organismid))
MakeProbabilisticJsonFile(json_file, blast_result_file, roleset_probability_file, outfile, options.folder, organismid, probannoid)
|
{
"content_hash": "b9025279cd5054c6e6a4ed1467bc993c",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 144,
"avg_line_length": 46.86764705882353,
"alnum_prop": 0.7587072481957954,
"repo_name": "kbase/probabilistic_annotation",
"id": "e48c82d95dc33c79bfe160fd12070d9d97c28d8c",
"size": "3256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "internalScripts/Probability_calculation_frontend.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "9055"
},
{
"name": "Perl",
"bytes": "9967"
},
{
"name": "Python",
"bytes": "375970"
},
{
"name": "R",
"bytes": "2597"
},
{
"name": "Shell",
"bytes": "8245"
}
],
"symlink_target": ""
}
|
"""
This module provides several specific functions to save figures nicely. `fancy` means latex
interpreter and generates *.eps and *.pdf
"""
import numpy as np
import matplotlib
def get_colors():
return ['k', 'orange', 'skyblue', 'lightgrey', "royalblue", 'r', "#a65628", "#f781bf", "#4daf4a"]
#return ['k', 'orange', 'skyblue', 'seagreen', "#984ea3", 'r', "#a65628", "#f781bf", "#4daf4a"]
def get_markers():
return ['D', 's', 'p', '^', "v", "o"]
def savefig(fname,fig,fancy=False,pdf_transparence=True):
import os
import subprocess
directory=os.path.dirname(os.path.abspath(fname))
if not os.path.exists(directory):
os.makedirs(directory)
fig.savefig(fname+'.png',dpi=300)
if fancy:
fig.savefig(fname+'.pdf',transparent=pdf_transparence)
#fig.savefig(fname+'.eps',transparent=True)
#os.system("epstopdf "+fname+".eps")
command = 'pdfcrop %s.pdf' % fname
subprocess.check_output(command, shell=True)
os.system('mv '+fname+'-crop.pdf '+fname+'.pdf')
def set_fancy(txtsize=16):
from matplotlib import rc
#rc('font',**{'family':'serif','serif':['Palatino'],'size':16})
rc('font',**{'size':txtsize})
rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
rc('text', usetex=True)
def cmap_map(function,cmap):
""" Applies function (which should operate on vectors of shape 3:
[r, g, b], on colormap cmap. This routine will break any discontinuous points in a colormap.
"""
cdict = cmap._segmentdata
step_dict = {}
# Firt get the list of points where the segments start or end
for key in ('red','green','blue'): step_dict[key] = map(lambda x: x[0], cdict[key])
step_list = sum(step_dict.values(), [])
step_list = np.array(list(set(step_list)))
# Then compute the LUT, and apply the function to the LUT
reduced_cmap = lambda step : np.array(cmap(step)[0:3])
old_LUT = np.array(map( reduced_cmap, step_list))
new_LUT = np.array(map( function, old_LUT))
# Now try to make a minimal segment definition of the new LUT
cdict = {}
for i,key in enumerate(('red','green','blue')):
this_cdict = {}
for j,step in enumerate(step_list):
if step in step_dict[key]:
this_cdict[step] = new_LUT[j,i]
elif new_LUT[j,i]!=old_LUT[j,i]:
this_cdict[step] = new_LUT[j,i]
colorvector= map(lambda x: x + (x[1], ), this_cdict.items())
colorvector.sort()
cdict[key] = colorvector
return matplotlib.colors.LinearSegmentedColormap('colormap',cdict,1024)
|
{
"content_hash": "56115acc8721429df6d761688baf3e9b",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 98,
"avg_line_length": 34.04225352112676,
"alnum_prop": 0.6681836988001655,
"repo_name": "kuntzer/binfind",
"id": "a6bb27d77c79bd4598e617e92b1fbf778ace1312",
"size": "2417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plots/figures.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "105889"
}
],
"symlink_target": ""
}
|
class Solution:
def maximum69Number (self, num: int) -> int:
# Replace first 6 with 9 if exists
return(str(num).replace('6', '9', 1))
|
{
"content_hash": "dac5b1aaac346cba151e3722d1d131e3",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 48,
"avg_line_length": 38.5,
"alnum_prop": 0.5974025974025974,
"repo_name": "qiyuangong/leetcode",
"id": "8caccebd87be206df06935954a32eead47328585",
"size": "154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/1323_Maximum_69_Number.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "12220"
},
{
"name": "Java",
"bytes": "128259"
},
{
"name": "Python",
"bytes": "439314"
}
],
"symlink_target": ""
}
|
"""
kombu.pidbox
===============
Generic process mailbox.
"""
from __future__ import absolute_import
import socket
import warnings
from collections import defaultdict, deque
from copy import copy
from itertools import count
from threading import local
from time import time
from . import Exchange, Queue, Consumer, Producer
from .clocks import LamportClock
from .common import maybe_declare, oid_from
from .exceptions import InconsistencyError
from .five import range
from .log import get_logger
from .utils import cached_property, kwdict, uuid, reprcall
REPLY_QUEUE_EXPIRES = 10
W_PIDBOX_IN_USE = """\
A node named %(hostname)r is already using this process mailbox!
Maybe you forgot to shutdown the other node or did not do so properly?
Or if you meant to start multiple nodes on the same host please make sure
you give each node a unique node name!
"""
__all__ = ['Node', 'Mailbox']
logger = get_logger(__name__)
debug, error = logger.debug, logger.error
class Node(object):
#: hostname of the node.
hostname = None
#: the :class:`Mailbox` this is a node for.
mailbox = None
#: map of method name/handlers.
handlers = None
#: current context (passed on to handlers)
state = None
#: current channel.
channel = None
def __init__(self, hostname, state=None, channel=None,
handlers=None, mailbox=None):
self.channel = channel
self.mailbox = mailbox
self.hostname = hostname
self.state = state
self.adjust_clock = self.mailbox.clock.adjust
if handlers is None:
handlers = {}
self.handlers = handlers
def Consumer(self, channel=None, **options):
options.setdefault('no_ack', True)
options.setdefault('accept', self.mailbox.accept)
queue = self.mailbox.get_queue(self.hostname)
def verify_exclusive(name, messages, consumers):
if consumers:
warnings.warn(W_PIDBOX_IN_USE % {'hostname': self.hostname})
queue.on_declared = verify_exclusive
return Consumer(channel or self.channel, [queue], **options)
def handler(self, fun):
self.handlers[fun.__name__] = fun
return fun
def listen(self, channel=None, callback=None):
callback = callback or self.handle_message
consumer = self.Consumer(channel=channel,
callbacks=[callback or self.handle_message])
consumer.consume()
return consumer
def dispatch(self, method, arguments=None,
reply_to=None, ticket=None, **kwargs):
arguments = arguments or {}
debug('pidbox received method %s [reply_to:%s ticket:%s]',
reprcall(method, (), kwargs=arguments), reply_to, ticket)
handle = reply_to and self.handle_call or self.handle_cast
try:
reply = handle(method, kwdict(arguments))
except SystemExit:
raise
except Exception as exc:
error('pidbox command error: %r', exc, exc_info=1)
reply = {'error': repr(exc)}
if reply_to:
self.reply({self.hostname: reply},
exchange=reply_to['exchange'],
routing_key=reply_to['routing_key'],
ticket=ticket)
return reply
def handle(self, method, arguments={}):
return self.handlers[method](self.state, **arguments)
def handle_call(self, method, arguments):
return self.handle(method, arguments)
def handle_cast(self, method, arguments):
return self.handle(method, arguments)
def handle_message(self, body, message=None):
destination = body.get('destination')
if message:
self.adjust_clock(message.headers.get('clock') or 0)
if not destination or self.hostname in destination:
return self.dispatch(**kwdict(body))
dispatch_from_message = handle_message
def reply(self, data, exchange, routing_key, ticket, **kwargs):
self.mailbox._publish_reply(data, exchange, routing_key, ticket,
channel=self.channel)
class Mailbox(object):
node_cls = Node
exchange_fmt = '%s.pidbox'
reply_exchange_fmt = 'reply.%s.pidbox'
#: Name of application.
namespace = None
#: Connection (if bound).
connection = None
#: Exchange type (usually direct, or fanout for broadcast).
type = 'direct'
#: mailbox exchange (init by constructor).
exchange = None
#: exchange to send replies to.
reply_exchange = None
def __init__(self, namespace,
type='direct', connection=None, clock=None, accept=None):
self.namespace = namespace
self.connection = connection
self.type = type
self.clock = LamportClock() if clock is None else clock
self.exchange = self._get_exchange(self.namespace, self.type)
self.reply_exchange = self._get_reply_exchange(self.namespace)
self._tls = local()
self.unclaimed = defaultdict(deque)
self.accept = accept
def __call__(self, connection):
bound = copy(self)
bound.connection = connection
return bound
def Node(self, hostname=None, state=None, channel=None, handlers=None):
hostname = hostname or socket.gethostname()
return self.node_cls(hostname, state, channel, handlers, mailbox=self)
def call(self, destination, command, kwargs={},
timeout=None, callback=None, channel=None):
return self._broadcast(command, kwargs, destination,
reply=True, timeout=timeout,
callback=callback,
channel=channel)
def cast(self, destination, command, kwargs={}):
return self._broadcast(command, kwargs, destination, reply=False)
def abcast(self, command, kwargs={}):
return self._broadcast(command, kwargs, reply=False)
def multi_call(self, command, kwargs={}, timeout=1,
limit=None, callback=None, channel=None):
return self._broadcast(command, kwargs, reply=True,
timeout=timeout, limit=limit,
callback=callback,
channel=channel)
def get_reply_queue(self):
oid = self.oid
return Queue('%s.%s' % (oid, self.reply_exchange.name),
exchange=self.reply_exchange,
routing_key=oid,
durable=False,
auto_delete=True,
queue_arguments={
'x-expires': int(REPLY_QUEUE_EXPIRES * 1000),
})
@cached_property
def reply_queue(self):
return self.get_reply_queue()
def get_queue(self, hostname):
return Queue('%s.%s.pidbox' % (hostname, self.namespace),
exchange=self.exchange,
durable=False,
auto_delete=True)
def _publish_reply(self, reply, exchange, routing_key, ticket,
channel=None):
chan = channel or self.connection.default_channel
exchange = Exchange(exchange, exchange_type='direct',
delivery_mode='transient',
durable=False)
producer = Producer(chan, auto_declare=False)
try:
producer.publish(
reply, exchange=exchange, routing_key=routing_key,
declare=[exchange], headers={
'ticket': ticket, 'clock': self.clock.forward(),
},
)
except InconsistencyError:
pass # queue probably deleted and no one is expecting a reply.
def _publish(self, type, arguments, destination=None,
reply_ticket=None, channel=None, timeout=None):
message = {'method': type,
'arguments': arguments,
'destination': destination}
chan = channel or self.connection.default_channel
exchange = self.exchange
if reply_ticket:
maybe_declare(self.reply_queue(channel))
message.update(ticket=reply_ticket,
reply_to={'exchange': self.reply_exchange.name,
'routing_key': self.oid})
producer = Producer(chan, auto_declare=False)
producer.publish(
message, exchange=exchange.name, declare=[exchange],
headers={'clock': self.clock.forward(),
'expires': time() + timeout if timeout else None},
)
def _broadcast(self, command, arguments=None, destination=None,
reply=False, timeout=1, limit=None,
callback=None, channel=None):
if destination is not None and \
not isinstance(destination, (list, tuple)):
raise ValueError(
'destination must be a list/tuple not {0}'.format(
type(destination)))
arguments = arguments or {}
reply_ticket = reply and uuid() or None
chan = channel or self.connection.default_channel
# Set reply limit to number of destinations (if specified)
if limit is None and destination:
limit = destination and len(destination) or None
self._publish(command, arguments, destination=destination,
reply_ticket=reply_ticket,
channel=chan,
timeout=timeout)
if reply_ticket:
return self._collect(reply_ticket, limit=limit,
timeout=timeout,
callback=callback,
channel=chan)
def _collect(self, ticket,
limit=None, timeout=1, callback=None,
channel=None, accept=None):
if accept is None:
accept = self.accept
chan = channel or self.connection.default_channel
queue = self.reply_queue
consumer = Consumer(channel, [queue], accept=accept, no_ack=True)
responses = []
unclaimed = self.unclaimed
adjust_clock = self.clock.adjust
try:
return unclaimed.pop(ticket)
except KeyError:
pass
def on_message(body, message):
# ticket header added in kombu 2.5
header = message.headers.get
adjust_clock(header('clock') or 0)
expires = header('expires')
if expires and time() > expires:
return
this_id = header('ticket', ticket)
if this_id == ticket:
if callback:
callback(body)
responses.append(body)
else:
unclaimed[this_id].append(body)
consumer.register_callback(on_message)
try:
with consumer:
for i in limit and range(limit) or count():
try:
self.connection.drain_events(timeout=timeout)
except socket.timeout:
break
return responses
finally:
chan.after_reply_message_received(queue.name)
def _get_exchange(self, namespace, type):
return Exchange(self.exchange_fmt % namespace,
type=type,
durable=False,
delivery_mode='transient')
def _get_reply_exchange(self, namespace):
return Exchange(self.reply_exchange_fmt % namespace,
type='direct',
durable=False,
delivery_mode='transient')
@cached_property
def oid(self):
try:
return self._tls.OID
except AttributeError:
oid = self._tls.OID = oid_from(self)
return oid
|
{
"content_hash": "29f8c9d0966c03a2cd03f639900ef16f",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 78,
"avg_line_length": 34.924418604651166,
"alnum_prop": 0.5700016647244881,
"repo_name": "romank0/kombu",
"id": "a619ce28a749b34a42e911892b6ebd9d760f6367",
"size": "12014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kombu/pidbox.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "629548"
},
{
"name": "Shell",
"bytes": "2059"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
import argparse
from transformers import BertTokenizer, XLMTokenizer, XLMRobertaTokenizer
import os
from collections import defaultdict
import csv
import random
import os
import shutil
import json
TOKENIZERS = {
'bert': BertTokenizer,
'xlm': XLMTokenizer,
'xlmr': XLMRobertaTokenizer,
}
PANX_LANGUAGES = 'ar he vi id jv ms tl eu ml ta te af nl en de el bn hi mr ur fa fr it pt es bg ru ja ka ko th sw yo my zh kk tr et fi hu qu pl uk az lt pa gu ro'.split(' ')
UDPOS_LANGUAGES = 'af ar bg de el en es et eu fa fi fr he hi hu id it ja kk ko mr nl pt ru ta te th tl tr ur vi yo zh lt pl uk wo ro'.split(' ')
def panx_tokenize_preprocess(args):
def _preprocess_one_file(infile, outfile, idxfile, tokenizer, max_len):
if not os.path.exists(infile):
print(f'{infile} not exists')
return 0
special_tokens_count = 3 if isinstance(tokenizer, XLMRobertaTokenizer) else 2
max_seq_len = max_len - special_tokens_count
subword_len_counter = idx = 0
with open(infile, "rt") as fin, open(outfile, "w") as fout, open(idxfile, "w") as fidx:
for line in fin:
line = line.strip()
if not line:
fout.write('\n')
fidx.write('\n')
idx += 1
subword_len_counter = 0
continue
items = line.split()
token = items[0].strip()
if len(items) == 2:
label = items[1].strip()
else:
label = 'O'
current_subwords_len = len(tokenizer.tokenize(token))
if (current_subwords_len == 0 or current_subwords_len > max_seq_len) and len(token) != 0:
token = tokenizer.unk_token
current_subwords_len = 1
if (subword_len_counter + current_subwords_len) > max_seq_len:
fout.write(f"\n{token}\t{label}\n")
fidx.write(f"\n{idx}\n")
subword_len_counter = current_subwords_len
else:
fout.write(f"{token}\t{label}\n")
fidx.write(f"{idx}\n")
subword_len_counter += current_subwords_len
return 1
model_type = args.model_type
tokenizer = TOKENIZERS[model_type].from_pretrained(args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
for lang in args.languages.split(','):
out_dir = os.path.join(args.output_dir, lang)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if lang == 'en':
files = ['dev', 'test', 'train']
else:
files = ['dev', 'test']
for file in files:
infile = os.path.join(args.data_dir, f'{file}-{lang}.tsv')
outfile = os.path.join(out_dir, "{}.{}".format(file, args.model_name_or_path))
idxfile = os.path.join(out_dir, "{}.{}.idx".format(file, args.model_name_or_path))
if os.path.exists(outfile) and os.path.exists(idxfile):
print(f'{outfile} and {idxfile} exist')
else:
code = _preprocess_one_file(infile, outfile, idxfile, tokenizer, args.max_len)
if code > 0:
print(f'finish preprocessing {outfile}')
def panx_preprocess(args):
def _process_one_file(infile, outfile):
lines = open(infile, 'r').readlines()
if lines[-1].strip() == '':
lines = lines[:-1]
with open(outfile, 'w') as fout:
for l in lines:
items = l.strip().split('\t')
if len(items) == 2:
label = items[1].strip()
idx = items[0].find(':')
if idx != -1:
token = items[0][idx+1:].strip()
if 'test' in infile:
fout.write(f'{token}\n')
else:
fout.write(f'{token}\t{label}\n')
else:
fout.write('\n')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
for lg in PANX_LANGUAGES:
for split in ['train', 'test', 'dev']:
infile = os.path.join(args.data_dir, f'{lg}-{split}')
outfile = os.path.join(args.output_dir, f'{split}-{lg}.tsv')
_process_one_file(infile, outfile)
def udpos_tokenize_preprocess(args):
def _preprocess_one_file(infile, outfile, idxfile, tokenizer, max_len):
if not os.path.exists(infile):
print(f'{infile} does not exist')
return
subword_len_counter = idx = 0
special_tokens_count = 3 if isinstance(tokenizer, XLMRobertaTokenizer) else 2
max_seq_len = max_len - special_tokens_count
with open(infile, "rt") as fin, open(outfile, "w") as fout, open(idxfile, "w") as fidx:
for line in fin:
line = line.strip()
if len(line) == 0 or line == '':
fout.write('\n')
fidx.write('\n')
idx += 1
subword_len_counter = 0
continue
items = line.split()
if len(items) == 2:
label = items[1].strip()
else:
label = "X"
token = items[0].strip()
current_subwords_len = len(tokenizer.tokenize(token))
if (current_subwords_len == 0 or current_subwords_len > max_seq_len) and len(token) != 0:
token = tokenizer.unk_token
current_subwords_len = 1
if (subword_len_counter + current_subwords_len) > max_seq_len:
fout.write(f"\n{token}\t{label}\n")
fidx.write(f"\n{idx}\n")
subword_len_counter = current_subwords_len
else:
fout.write(f"{token}\t{label}\n")
fidx.write(f"{idx}\n")
subword_len_counter += current_subwords_len
model_type = args.model_type
tokenizer = TOKENIZERS[model_type].from_pretrained(args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
for lang in args.languages.split(','):
out_dir = os.path.join(args.output_dir, lang)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if lang == 'en':
files = ['dev', 'test', 'train']
else:
files = ['dev', 'test']
for file in files:
infile = os.path.join(args.data_dir, "{}-{}.tsv".format(file, lang))
outfile = os.path.join(out_dir, "{}.{}".format(file, args.model_name_or_path))
idxfile = os.path.join(out_dir, "{}.{}.idx".format(file, args.model_name_or_path))
if os.path.exists(outfile) and os.path.exists(idxfile):
print(f'{outfile} and {idxfile} exist')
else:
_preprocess_one_file(infile, outfile, idxfile, tokenizer, args.max_len)
print(f'finish preprocessing {outfile}')
def udpos_preprocess(args):
def _read_one_file(file):
data = []
sent, tag, lines = [], [], []
for line in open(file, 'r'):
items = line.strip().split('\t')
if len(items) != 10:
empty = all(w == '_' for w in sent)
num_empty = sum([int(w == '_') for w in sent])
if num_empty == 0 or num_empty < len(sent) - 1:
data.append((sent, tag, lines))
sent, tag, lines = [], [], []
else:
sent.append(items[1].strip())
tag.append(items[3].strip())
lines.append(line.strip())
assert len(sent) == int(items[0]), 'line={}, sent={}, tag={}'.format(line, sent, tag)
return data
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def remove_empty_space(data):
new_data = {}
for split in data:
new_data[split] = []
for sent, tag, lines in data[split]:
new_sent = [''.join(w.replace('\u200c', '').split(' ')) for w in sent]
lines = [line.replace('\u200c', '') for line in lines]
assert len(" ".join(new_sent).split(' ')) == len(tag)
new_data[split].append((new_sent, tag, lines))
return new_data
def check_file(file):
for i, l in enumerate(open(file)):
items = l.strip().split('\t')
assert len(items[0].split(' ')) == len(items[1].split(' ')), 'idx={}, line={}'.format(i, l)
def _write_files(data, output_dir, lang, suffix):
for split in data:
if len(data[split]) > 0:
prefix = os.path.join(output_dir, f'{split}-{lang}')
if suffix == 'mt':
with open(prefix + '.mt.tsv', 'w') as fout:
for idx, (sent, tag, _) in enumerate(data[split]):
newline = '\n' if idx != len(data[split]) - 1 else ''
if split == 'test':
fout.write('{}{}'.format(' '.join(sent, newline)))
else:
fout.write('{}\t{}{}'.format(' '.join(sent), ' '.join(tag), newline))
check_file(prefix + '.mt.tsv')
print(' - finish checking ' + prefix + '.mt.tsv')
elif suffix == 'tsv':
with open(prefix + '.tsv', 'w') as fout:
for sidx, (sent, tag, _) in enumerate(data[split]):
for widx, (w, t) in enumerate(zip(sent, tag)):
newline = '' if (sidx == len(data[split]) - 1) and (widx == len(sent) - 1) else '\n'
if split == 'test':
fout.write('{}{}'.format(w, newline))
else:
fout.write('{}\t{}{}'.format(w, t, newline))
fout.write('\n')
elif suffix == 'conll':
with open(prefix + '.conll', 'w') as fout:
for _, _, lines in data[split]:
for l in lines:
fout.write(l.strip() + '\n')
fout.write('\n')
print(f'finish writing file to {prefix}.{suffix}')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
for root, dirs, files in os.walk(args.data_dir):
lg = root.strip().split('/')[-1]
if root == args.data_dir or lg not in UDPOS_LANGUAGES:
continue
data = {k: [] for k in ['train', 'dev', 'test']}
for f in sorted(files):
if f.endswith('conll'):
file = os.path.join(root, f)
examples = _read_one_file(file)
if 'train' in f:
data['train'].extend(examples)
elif 'dev' in f:
data['dev'].extend(examples)
elif 'test' in f:
data['test'].extend(examples)
else:
print('split not found: ', file)
print(' - finish reading {}, {}'.format(file, [(k, len(v)) for k,v in data.items()]))
data = remove_empty_space(data)
for sub in ['tsv']:
_write_files(data, args.output_dir, lg, sub)
def pawsx_preprocess(args):
def _preprocess_one_file(infile, outfile, remove_label=False):
data = []
for i, line in enumerate(open(infile, 'r')):
if i == 0:
continue
items = line.strip().split('\t')
sent1 = ' '.join(items[1].strip().split(' '))
sent2 = ' '.join(items[2].strip().split(' '))
label = items[3]
data.append([sent1, sent2, label])
with open(outfile, 'w') as fout:
writer = csv.writer(fout, delimiter='\t')
for sent1, sent2, label in data:
if remove_label:
writer.writerow([sent1, sent2])
else:
writer.writerow([sent1, sent2, label])
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
split2file = {'train': 'train', 'test': 'test_2k', 'dev': 'dev_2k'}
for lang in ['en', 'de', 'es', 'fr', 'ja', 'ko', 'zh']:
for split in ['train', 'test', 'dev']:
if split == 'train' and lang != 'en':
continue
file = split2file[split]
infile = os.path.join(args.data_dir, lang, "{}.tsv".format(file))
outfile = os.path.join(args.output_dir, "{}-{}.tsv".format(split, lang))
_preprocess_one_file(infile, outfile, remove_label=(split == 'test'))
print(f'finish preprocessing {outfile}')
def xnli_preprocess(args):
def _preprocess_file(infile, output_dir, split):
all_langs = defaultdict(list)
for i, line in enumerate(open(infile, 'r')):
if i == 0:
continue
items = line.strip().split('\t')
lang = items[0].strip()
label = "contradiction" if items[1].strip() == "contradictory" else items[1].strip()
sent1 = ' '.join(items[6].strip().split(' '))
sent2 = ' '.join(items[7].strip().split(' '))
all_langs[lang].append((sent1, sent2, label))
print(f'# langs={len(all_langs)}')
for lang, pairs in all_langs.items():
outfile = os.path.join(output_dir, '{}-{}.tsv'.format(split, lang))
with open(outfile, 'w') as fout:
writer = csv.writer(fout, delimiter='\t')
for (sent1, sent2, label) in pairs:
if split == 'test':
writer.writerow([sent1, sent2])
else:
writer.writerow([sent1, sent2, label])
print(f'finish preprocess {outfile}')
def _preprocess_train_file(infile, outfile):
with open(outfile, 'w') as fout:
writer = csv.writer(fout, delimiter='\t')
for i, line in enumerate(open(infile, 'r')):
if i == 0:
continue
items = line.strip().split('\t')
sent1 = ' '.join(items[0].strip().split(' '))
sent2 = ' '.join(items[1].strip().split(' '))
label = "contradiction" if items[2].strip() == "contradictory" else items[2].strip()
writer.writerow([sent1, sent2, label])
print(f'finish preprocess {outfile}')
infile = os.path.join(args.data_dir, 'XNLI-MT-1.0/multinli/multinli.train.en.tsv')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
outfile = os.path.join(args.output_dir, 'train-en.tsv')
_preprocess_train_file(infile, outfile)
for split in ['test', 'dev']:
infile = os.path.join(args.data_dir, 'XNLI-1.0/xnli.{}.tsv'.format(split))
print(f'reading file {infile}')
_preprocess_file(infile, args.output_dir, split)
def tatoeba_preprocess(args):
lang3_dict = {
'afr':'af', 'ara':'ar', 'bul':'bg', 'ben':'bn',
'deu':'de', 'ell':'el', 'spa':'es', 'est':'et',
'eus':'eu', 'pes':'fa', 'fin':'fi', 'fra':'fr',
'heb':'he', 'hin':'hi', 'hun':'hu', 'ind':'id',
'ita':'it', 'jpn':'ja', 'jav':'jv', 'kat':'ka',
'kaz':'kk', 'kor':'ko', 'mal':'ml', 'mar':'mr',
'nld':'nl', 'por':'pt', 'rus':'ru', 'swh':'sw',
'tam':'ta', 'tel':'te', 'tha':'th', 'tgl':'tl',
'tur':'tr', 'urd':'ur', 'vie':'vi', 'cmn':'zh',
'eng':'en', 'aze': 'az', 'lit': 'lt', 'pol': 'pl',
'ukr': 'uk', 'ron': 'ro'
}
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
for sl3, sl2 in lang3_dict.items():
if sl3 != 'eng':
src_file = f'{args.data_dir}/tatoeba.{sl3}-eng.{sl3}'
tgt_file = f'{args.data_dir}/tatoeba.{sl3}-eng.eng'
src_out = f'{args.output_dir}/{sl2}-en.{sl2}'
tgt_out = f'{args.output_dir}/{sl2}-en.en'
shutil.copy(src_file, src_out)
tgts = [l.strip() for l in open(tgt_file)]
idx = range(len(tgts))
data = zip(tgts, idx)
with open(tgt_out, 'w') as ftgt:
for t, i in sorted(data, key=lambda x: x[0]):
ftgt.write(f'{t}\n')
def xquad_preprocess(args):
# Remove the test annotations to prevent accidental cheating
remove_qa_test_annotations(args.data_dir)
def mlqa_preprocess(args):
# Remove the test annotations to prevent accidental cheating
remove_qa_test_annotations(args.data_dir)
def tydiqa_preprocess(args):
LANG2ISO = {'arabic': 'ar', 'bengali': 'bn', 'english': 'en', 'finnish': 'fi',
'indonesian': 'id', 'korean': 'ko', 'russian': 'ru',
'swahili': 'sw', 'telugu': 'te'}
assert os.path.exists(args.data_dir)
train_file = os.path.join(args.data_dir, 'tydiqa-goldp-v1.1-train.json')
os.makedirs(args.output_dir, exist_ok=True)
# Split the training file into language-specific files
lang2data = defaultdict(list)
with open(train_file, 'r') as f_in:
data = json.load(f_in)
version = data['version']
for doc in data['data']:
for par in doc['paragraphs']:
context = par['context']
for qa in par['qas']:
question = qa['question']
question_id = qa['id']
example_lang = question_id.split('-')[0]
q_id = question_id.split('-')[-1]
for answer in qa['answers']:
a_start, a_text = answer['answer_start'], answer['text']
a_end = a_start + len(a_text)
assert context[a_start:a_end] == a_text
lang2data[example_lang].append({'paragraphs': [{
'context': context,
'qas': [{'answers': qa['answers'],
'question': question,
'id': q_id}]}]})
for lang, data in lang2data.items():
out_file = os.path.join(
args.output_dir, 'tydiqa.goldp.%s.train.json' % LANG2ISO[lang])
with open(out_file, 'w') as f:
json.dump({'data': data, 'version': version}, f)
# Rename the dev files
dev_dir = os.path.join(args.data_dir, 'tydiqa-goldp-v1.1-dev')
assert os.path.exists(dev_dir)
for lang, iso in LANG2ISO.items():
src_file = os.path.join(dev_dir, 'tydiqa-goldp-dev-%s.json' % lang)
dst_file = os.path.join(dev_dir, 'tydiqa.goldp.%s.dev.json' % iso)
os.rename(src_file, dst_file)
# Remove the test annotations to prevent accidental cheating
remove_qa_test_annotations(dev_dir)
def remove_qa_test_annotations(test_dir):
assert os.path.exists(test_dir)
for file_name in os.listdir(test_dir):
new_data = []
test_file = os.path.join(test_dir, file_name)
with open(test_file, 'r') as f:
data = json.load(f)
version = data['version']
for doc in data['data']:
for par in doc['paragraphs']:
context = par['context']
for qa in par['qas']:
question = qa['question']
question_id = qa['id']
for answer in qa['answers']:
a_start, a_text = answer['answer_start'], answer['text']
a_end = a_start + len(a_text)
assert context[a_start:a_end] == a_text
new_data.append({'paragraphs': [{
'context': context,
'qas': [{'answers': [{'answer_start': 0, 'text': ''}],
'question': question,
'id': question_id}]}]})
with open(test_file, 'w') as f:
json.dump({'data': new_data, 'version': version}, f)
def xcopa_preprocess(args):
assert os.path.exists(args.data_dir)
# Remove the test annotations to prevent accidental cheating
for file_name in os.listdir(args.data_dir):
if not file_name.startswith('test'):
continue
test_file = os.path.join(args.data_dir, file_name)
new_examples = []
with open(test_file, 'r') as f:
for row in f:
example = json.loads(row)
# A label is still required by the dataset format, so we set it to 0
# for all examples
example['label'] = 0
new_examples.append(example)
with open(test_file, 'w') as f:
for example in new_examples:
json_ex = json.dumps(example)
f.write(json_ex + '\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output data dir where any processed files will be written to.")
parser.add_argument("--task", default="panx", type=str, required=True,
help="The task name")
parser.add_argument("--model_name_or_path", default="bert-base-multilingual-cased", type=str,
help="The pre-trained model")
parser.add_argument("--model_type", default="bert", type=str,
help="model type")
parser.add_argument("--max_len", default=512, type=int,
help="the maximum length of sentences")
parser.add_argument("--do_lower_case", action='store_true',
help="whether to do lower case")
parser.add_argument("--cache_dir", default=None, type=str,
help="cache directory")
parser.add_argument("--languages", default="en", type=str,
help="process language")
parser.add_argument("--remove_last_token", action='store_true',
help="whether to remove the last token")
parser.add_argument("--remove_test_label", action='store_true',
help="whether to remove test set label")
args = parser.parse_args()
if args.task == 'panx_tokenize':
panx_tokenize_preprocess(args)
if args.task == 'panx':
panx_preprocess(args)
if args.task == 'udpos_tokenize':
udpos_tokenize_preprocess(args)
if args.task == 'udpos':
udpos_preprocess(args)
if args.task == 'pawsx':
pawsx_preprocess(args)
if args.task == 'xnli':
xnli_preprocess(args)
if args.task == 'tatoeba':
tatoeba_preprocess(args)
if args.task == 'xquad':
xquad_preprocess(args)
if args.task == 'mlqa':
mlqa_preprocess(args)
if args.task == 'tydiqa':
tydiqa_preprocess(args)
if args.task == 'xcopa':
xcopa_preprocess(args)
|
{
"content_hash": "5c60c0f066573791820f083d4a7dbad2",
"timestamp": "",
"source": "github",
"line_count": 548,
"max_line_length": 173,
"avg_line_length": 38.45255474452555,
"alnum_prop": 0.5668659832953683,
"repo_name": "google-research/xtreme",
"id": "1ef79a48f7ecb7bccc0672f4322bab3e8e4708c3",
"size": "21671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils_preprocess.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "84598"
},
{
"name": "Shell",
"bytes": "51220"
}
],
"symlink_target": ""
}
|
import requests
import json
import re
import sys, getopt
DEBUG = False
def get_json_data_from_request(subreddit, custom_headers, limit):
url = "http://www.reddit.com/r/" + subreddit + ".json?limit=" + `limit`
r = requests.get(url, headers=custom_headers)
if r.status_code!=200:
raise StandardError("Error when calling " + url + " : status_code: " + `r.status_code`)
else:
json_music = r.json()
if DEBUG:
print("Data got from " + url)
with open(subreddit + '.json', 'w') as fp:
json.dump(json_music, fp)
print("JSON data written in file " + subreddit + ".json")
return json_music
def json_data_to_youtube_list(json_music):
children = json_music["data"]["children"]
playlist = []
for child in children:
data_child = child["data"]
url = data_child["url"]
try:
id = get_youtube_id(url)
playlist.append({"id":id})
except ValueError:
pass
return playlist
def get_youtube_id(youtube_link):
m = re.compile("https?://www.youtube.com/.*v=(.{11,11})").match(youtube_link)
if m:
return m.group(1)
m = re.compile("https?://youtu.be/(.{11,11})").match(youtube_link)
if m:
return m.group(1)
else:
raise ValueError("Error : can't find youtube video id for link " + youtube_link)
def create_youtube_playlist(subreddit, limit):
custom_headers = {'user-agent': 'reddit music player v0.1, by /u/rboyboy'}
json_music = get_json_data_from_request(subreddit, custom_headers, limit)
return json_data_to_youtube_list(json_music)
# MAIN
def main(argv):
actual_reddit = "music"
limit = 25
helper="Usage : \"python " + sys.argv[0] + " [-h] [--subreddit ...] [-l ...]\nOptions and arguments :\n-h : Display helper\n--subreddit : The name of the subreddit (Default : \"music\")\n-l : Maximum number of posts parsed by the script (default : 25, min 0, max 100)"
# Get parameters
try:
opts, args = getopt.getopt(argv, "hl:", ["subreddit="])
except getopt.GetoptError:
if DEBUG:
print "GetoptError"
print helper
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print helper
sys.exit()
if opt == '--subreddit':
actual_reddit = arg
if opt == '-l':
try:
limit=int(arg)
except ValueError:
print "Invalid parameter for -l : " + arg + " ! We need an integer between 0 and 100"
sys.exit(2)
if (limit<0) or (limit>100):
print "Invalid parameter for -l : " + arg + " ! We need an integer between 0 and 100"
sys.exit(2)
playlist={}
try:
playlist = create_youtube_playlist(actual_reddit, limit);
except:
raise
output_filename = "playlist_" + actual_reddit + ".json"
with open(output_filename, 'w') as out:
json.dump(playlist, out)
if DEBUG:
print "Playlist written in " + output_filename
if __name__ == "__main__":
main(sys.argv[1:])
|
{
"content_hash": "1928eb430e604461f86449265071abfa",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 288,
"avg_line_length": 28.470588235294116,
"alnum_prop": 0.6167355371900827,
"repo_name": "rboyboy/reddit-youtube-playlist",
"id": "0c04c265f6bb1ed5aaf33750e497643f7b260089",
"size": "2904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reddit_youtube.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4313"
}
],
"symlink_target": ""
}
|
from sahara.plugins import utils as plugin_utils
from sahara.service.edp.spark import engine as shell_engine
class ShellEngine(shell_engine.SparkShellJobEngine):
def __init__(self, cluster):
super(ShellEngine, self).__init__(cluster)
self.master = plugin_utils.get_instance(cluster, "master")
@staticmethod
def job_type_supported(job_type):
return (job_type in shell_engine.SparkShellJobEngine.
get_supported_job_types())
|
{
"content_hash": "861559253a2b6f928fc8eb6b4a98f481",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 66,
"avg_line_length": 36.69230769230769,
"alnum_prop": 0.70020964360587,
"repo_name": "tellesnobrega/sahara",
"id": "77484271ad1a8daa00ab8f3ec5ead929e452dd27",
"size": "1067",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "sahara/plugins/spark/shell_engine.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "952"
},
{
"name": "Python",
"bytes": "3354711"
},
{
"name": "Shell",
"bytes": "56856"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
from jinja2_gtm import __version__
setup(
name = "django-jinja2-gtm",
version = __version__,
author = "Damien Barchowsky",
author_email = "dbarchowsky@gmail.com",
description = ("Jinja2 macro for injecting Google Tag Manager code in templates."),
license = "MIT",
keywords = "django jinja2 generic-views",
url = "https://github.com/dbarchowsky/django-jinja2-gtm",
packages=find_packages(),
include_package_data=True,
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
)
|
{
"content_hash": "3979ca8a139c137b11bc1a79a89cd853",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 87,
"avg_line_length": 33.86666666666667,
"alnum_prop": 0.6653543307086615,
"repo_name": "dbarchowsky/django-jinja2-gtm",
"id": "a2f8c0e5932fed4a26d9ac21473203a8be8cf5a4",
"size": "1016",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "631"
},
{
"name": "Python",
"bytes": "1037"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class Config(AppConfig):
name = 'apps.poll'
verbose_name = _(u'Poll')
|
{
"content_hash": "e2eec64f7c18361b080211e77fc3de9f",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 55,
"avg_line_length": 24.285714285714285,
"alnum_prop": 0.7235294117647059,
"repo_name": "dotKom/studlan",
"id": "4ce19c7b060d10d55da7d602bb3a4f77893dc48e",
"size": "195",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apps/poll/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5086"
},
{
"name": "HTML",
"bytes": "74899"
},
{
"name": "JavaScript",
"bytes": "11595"
},
{
"name": "Python",
"bytes": "159175"
},
{
"name": "Shell",
"bytes": "3027"
}
],
"symlink_target": ""
}
|
import sys, os
sys.path.insert(0, os.path.abspath('..'))
import math
import logging
import spotipy
import spotipy.util
from spotipy.oauth2 import SpotifyClientCredentials
class SpotifyPlayer(object):
"""
Class for interacting with spotify. Relies on spotipy. Gets a token for a partiuclar
user, and creates spotipy instance from this token for user operations.
Uses client credential flow for getting and searching for track ids.
"""
def __init__(self, spfy_user_id, spfy_app_id='', spfy_app_secret=''):
"""
Constructor for the spotify player class
:param spfy_app_id: id of the client application registered with spotify
:param spfy_app_secret: secret of the client application registered with spotify
:param sofy_user_id: user id who's playlists we want to change
"""
playlist_write_scope = 'playlist-modify-public'
client_credentials_manager = None
auth_spotipy = None
self.user_id=spfy_user_id
#If client id & sceret specified, initialize with the supplied values. Otherwise, assume that they have been
#set as environment variables
if spfy_app_id != '' and spfy_app_secret != '':
client_credentials_manager = SpotifyClientCredentials(spfy_app_id, spfy_app_secret)
auth_spotipy = spotipy.Spotify(auth=spotipy.util.prompt_for_user_token(
spfy_user_id, playlist_write_scope, spfy_app_id, spfy_app_secret))
else:
client_credentials_manager = SpotifyClientCredentials()
auth_spotipy = spotipy.Spotify(auth=spotipy.util.prompt_for_user_token(spfy_user_id, playlist_write_scope))
self.auth_spotipy = auth_spotipy
self.ccm_spotipy = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
def create_playlist(self, playlist_name, description=''):
"""
Creates playlist with the specified name. Description is currently ignored.
"""
logging.debug('creating playlist for user {user}: {playlist} --- {description}'.format(
user=self.user_id, playlist=playlist_name, description=description))
result = None
try:
result = self.auth_spotipy.user_playlist_create(self.user_id, playlist_name)
except:
e = sys.exc_info()[0]
logging.error('error in creating playlist for user {user}: {playlist} --- {description}... error: {error}'.format(
user=self.user_id, playlist=playlist_name, description=description, error=e))
raise
playlist_id = result['id']
logging.debug('created playlist {name} with id {id}'.format(name=playlist_name, id=playlist_id))
return playlist_id
def add_track_ids_to_playlist(self, user_id, playlist_id, track_ids):
"""
Add track ids to specified playlist (spotify can only add 100 at a time)
Adds in batches of batch_size
:param user_id: username of the user who owns the specified playlist
:param playlist_id: playlist to which we're adding tracks
:param track_ids: list of track ids that we're adding to playlist
"""
batch_size = 75
dedeuplicated_track_ids = []
#Try to pre-populate track ids with whatever's already in the playlist
if self.check_playlist_exists(user_id, playlist_id):
dedeuplicated_track_ids = self.get_tracks_in_playlist(user_id, playlist_id)
for i in track_ids:
if i not in dedeuplicated_track_ids:
dedeuplicated_track_ids.append(i)
for i in range(0, math.ceil(len(dedeuplicated_track_ids) / batch_size)):
#get current slize of track ids
max_ind = min(len(dedeuplicated_track_ids), (i+1)*batch_size)
logging.debug('Max_index: {}'.format(max_ind))
track_id_slice = dedeuplicated_track_ids[i*batch_size:max_ind]
logging.info('Attempting to add {} tracks'.format(len(track_id_slice)))
try:
self.auth_spotipy.user_playlist_add_tracks(user_id, playlist_id, track_id_slice)
except:
e = sys.exc_info()[0]
logging.error('error in adding tracks {tracks} to playlist {playlist} for user {user} :{e}'.format(
tracks=track_id_slice, playlist=playlist_id, user=user_id, e=e))
raise
def get_tracks_in_playlist(self, user_id, playlist_id):
track_ids = []
playlist_tracks = self.auth_spotipy.user_playlist_tracks(user_id, playlist_id=playlist_id)
track_ids = [playlist_track['track']['id'] for playlist_track in playlist_tracks['items']]
return track_ids
def add_tracks_to_playlist_by_name(self, user_id, playlist_id, track_info):
"""
Adds a track to the specified playlist id using track_info as a best-guess
:param track_info: A list of dictionaries containing either 'artist' and 'track' keys or a 'blob'
:param playlist_id: spotify's numerical representation of a particular playlist
"""
track_ids = self.get_track_ids_from_track_info(track_info)
self.add_track_ids_to_playlist(user_id, playlist_id, track_ids)
def get_playlist_id_from_name(self, user_id, playlist_name):
"""
:param playlist_name: friendly name of the playlist
:param user_id: user_id whose playlist we want
"""
if user_id == '':
user_id = self.user_id
playlists = []
try:
playlists = self.auth_spotipy.user_playlists(user_id)
except:
e = sys.exc_info()[0]
logging.error('error in getting playlist {playlist_name} for user {user}: {e}'.format(
user=user_id, playlist_name=playlist_name, e=e))
raise
for playlist in playlists['items']:
if playlist['owner']['id'] == user_id and playlist['name'] == playlist_name:
return playlist['id']
logging.warning('no playlist with name {name} found in {user}\'s account'.format(name=playlist_name,user=user_id))
return None
def check_playlist_exists(self, user_id, playlist_name):
"""
:param user_id: spotify user id
:param playlist_name: the friendly name of the playlist we're checking for
"""
playlist_id = self.get_playlist_id_from_name(user_id, playlist_name)
playlists = []
try:
playlists = self.auth_spotipy.user_playlists(user_id)
except:
e = sys.exc_info()[0]
logging.error('error in getting playlist {playlist_id} for user {user}: {e}'.format(
user=user_id, playlist_id=playlist_id, e=e))
raise
for playlist in playlists['items']:
if playlist['owner']['id'] == user_id and playlist['id'] == playlist_id:
return True
return False
def get_track_ids_from_track_info(self, track_info):
"""
Returns the best guess track id for the supplied track info
:param track_info: A list of dictionaries containing either 'artist' and 'track' keys or a 'blob'
"""
track_ids = []
for track in track_info:
track_id = self.get_track_id_from_track_info(track)
logging.info("retrieved track id {} for {}".format(track_id, track))
track_ids.append(track_id)
return track_ids
def get_track_id_from_track_info(self, track_info):
returned_tracks = None
try:
returned_tracks = self.search_track(track_info)['tracks']['items']
except:
e = sys.exc_info()[0]
logging.error('error in searching for track {track}: {e}'.format(track=track_info, e=e))
raise
if len(returned_tracks) == 0:
logging.warning('Unable to retrieve id for {track}, skipping'.format(track=track_info))
return 0
return returned_tracks[0]['id']
def search_track(self, track_info, limit=1):
"""
Gets list of possible tracks for the supplied track information
:param track_info: A list of dictionaries containing either 'artist' and 'track' keys or a 'blob'
"""
logging.debug('track info {track_info}'.format(track_info=track_info))
query = None
try:
query = '{track} {artist}'.format(track=track_info['track'], artist=track_info['artist'])
except:
query = '{}'.format(track_info['blob'])
logging.info('searching spotify with query {query}'.format(query=query))
try:
retrieved_tracks = self.ccm_spotipy.search(query, limit=limit)
except:
e = sys.exc_info()[0]
logging.error('error in retrieving tracks for {track_info}... error: {error}'.format(
track_info=track_info, error=e))
raise
logging.debug('retrieved tracks {tracks}'.format(tracks=retrieved_tracks))
return retrieved_tracks
|
{
"content_hash": "c1270878e42519aed21f225a466fe75e",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 126,
"avg_line_length": 39.85470085470085,
"alnum_prop": 0.6013296161269569,
"repo_name": "jcgknudson/spotify-lib",
"id": "89bd32233e7ca65a6fd5c25a9b72322338cefd44",
"size": "9411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/players/spotify.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35334"
},
{
"name": "Shell",
"bytes": "464"
}
],
"symlink_target": ""
}
|
"""transformer (attention seq-seq model) with mixtures of experts.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from six.moves import xrange # pylint: disable=redefined-builtin
from DLT2T.layers import common_attention
from DLT2T.layers import common_hparams
from DLT2T.layers import common_layers
from DLT2T.models import transformer
from DLT2T.utils import expert_utils
from DLT2T.utils import registry
from DLT2T.utils import t2t_model
import tensorflow as tf
@registry.register_model
class TransformerMoe(t2t_model.T2TModel):
"""Attention net. See file docstring."""
def model_fn_body_sharded(self, sharded_features):
hparams = self._hparams
dp = self._data_parallelism
targets = sharded_features["targets"]
inputs = sharded_features["inputs"]
target_space = sharded_features["target_space_id"]
inputs = dp(common_layers.flatten4d3d, inputs)
targets = dp(common_layers.flatten4d3d, targets)
def preprocess(x):
return dp(common_layers.layer_preprocess, x, hparams)
def postprocess(x, y):
return dp(common_layers.layer_postprocess, x, y, hparams)
(encoder_input, encoder_self_attention_bias,
encoder_decoder_attention_bias) = dp(
transformer.transformer_prepare_encoder,
inputs, target_space, hparams)
(decoder_input, decoder_self_attention_bias) = dp(
transformer.transformer_prepare_decoder, targets, hparams)
encoder_input = dp(tf.nn.dropout, encoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
decoder_input = dp(tf.nn.dropout, decoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
extra_loss = 0
moe_hidden_sizes = [int(s) for s in hparams.moe_hidden_sizes.split(",")]
expert_fn = expert_utils.ffn_expert_fn(
hparams.hidden_size, moe_hidden_sizes, hparams.hidden_size)
x = encoder_input
for layer in xrange(hparams.num_hidden_layers):
with tf.variable_scope("encoder_layer_%d" % layer):
with tf.variable_scope("encoder_self_attention"):
y = dp(
common_attention.multihead_attention,
preprocess(x),
None,
encoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout)
x = postprocess(x, y)
with tf.variable_scope("ffn"):
if str(layer) in hparams.moe_layers_encoder.split(","):
y, loss = expert_utils.distributed_moe(
dp,
self._ps_devices,
preprocess(x),
hparams.mode == tf.estimator.ModeKeys.TRAIN,
input_size=hparams.hidden_size,
expert_fn=expert_fn,
num_experts=hparams.moe_num_experts,
k=hparams.moe_k,
loss_coef=hparams.moe_loss_coef)
extra_loss += loss
else:
y = dp(
common_layers.conv_hidden_relu,
preprocess(x),
hparams.filter_size,
hparams.hidden_size,
dropout=hparams.relu_dropout)
x = postprocess(x, y)
encoder_output = preprocess(x)
x = decoder_input
for layer in xrange(hparams.num_hidden_layers):
with tf.variable_scope("decoder_layer_%d" % layer):
with tf.variable_scope("decoder_self_attention"):
y = dp(
common_attention.multihead_attention,
preprocess(x),
None,
decoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout)
x = postprocess(x, y)
with tf.variable_scope("encoder_decoder_attention"):
y = dp(
common_attention.multihead_attention,
preprocess(x),
encoder_output,
encoder_decoder_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout)
x = postprocess(x, y)
with tf.variable_scope("ffn"):
if str(layer) in hparams.moe_layers_decoder.split(","):
y, loss = expert_utils.distributed_moe(
dp,
self._ps_devices,
preprocess(x),
hparams.mode == tf.estimator.ModeKeys.TRAIN,
input_size=hparams.hidden_size,
expert_fn=expert_fn,
num_experts=hparams.moe_num_experts,
k=hparams.moe_k,
loss_coef=hparams.moe_loss_coef)
extra_loss += loss
else:
y = dp(
common_layers.conv_hidden_relu,
preprocess(x),
hparams.filter_size,
hparams.hidden_size,
dropout=hparams.relu_dropout)
x = postprocess(x, y)
x = preprocess(x)
decoder_output = dp(tf.expand_dims, x, 2)
return decoder_output, extra_loss
@registry.register_hparams
def transformer_moe_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.norm_type = "layer"
hparams.hidden_size = 512
hparams.batch_size = 4096
hparams.max_length = 2001
hparams.max_input_seq_length = 2000
hparams.max_target_seq_length = 2000
hparams.dropout = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 4000
hparams.initializer_gain = 1.0
hparams.num_hidden_layers = 5
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.num_sampled_classes = 0
hparams.label_smoothing = 0.0
hparams.shared_embedding_and_softmax_weights = int(True)
hparams.add_hparam("filter_size", 2048) # Add new ones like this.
# attention-related flags
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("ffn_layer", "conv_hidden_relu")
hparams.add_hparam("parameter_attention_key_channels", 0)
hparams.add_hparam("parameter_attention_value_channels", 0)
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("relu_dropout", 0.0)
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam("nbr_decoder_problems", 1)
hparams.add_hparam("proximity_bias", int(False))
# FLAGS RELATED TO MIXTURE-OF-EXPERTS
# comma-separated list of layer numbers.
# At each of these layers, we replace the ffn with a mixture of experts.
hparams.add_hparam("moe_layers_encoder", "2")
hparams.add_hparam("moe_layers_decoder", "2")
return hparams
@registry.register_hparams
def transformer_no_moe():
"""Without the mixture of experts (for comparison)."""
hparams = transformer_moe_base()
hparams.moe_layers_encoder = ""
hparams.moe_layers_decoder = ""
return hparams
@registry.register_hparams
def transformer_moe_1b():
"""1-billion parameter model - requires multi-gpu sync training."""
hparams = transformer_moe_base()
hparams.moe_n1 = 128
hparams.moe_layers_encoder = "1,3"
hparams.moe_layers_decoder = "1,3"
return hparams
|
{
"content_hash": "336f06de3369032551c9bfc221a91622",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 76,
"avg_line_length": 37.39906103286385,
"alnum_prop": 0.6351995982927442,
"repo_name": "renqianluo/DLT2T",
"id": "fb90df2f3dfbf97f15fb43b5f0023bf8a6ef1593",
"size": "8564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DLT2T/models/transformer_moe.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "10699"
},
{
"name": "Jupyter Notebook",
"bytes": "14574"
},
{
"name": "Python",
"bytes": "1155627"
},
{
"name": "Shell",
"bytes": "744"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import mock
import pytest
from tornado import gen
from tchannel import (
TChannel, Request, Response,
thrift_request_builder, schemes,
)
from tchannel.response import TransportHeaders
from tchannel.errors import OneWayNotSupportedError
from tchannel.errors import ProtocolError
from tchannel.errors import ValueExpectedError
from tchannel.testing.data.generated.ThriftTest import SecondService
from tchannel.testing.data.generated.ThriftTest import ThriftTest
# TODO - where possible, in req/res style test, create parameterized tests,
# each test should test w headers and wout
# and potentially w retry and timeout as well.
# note this wont work with complex scenarios
@pytest.mark.gen_test
@pytest.mark.call
def test_void():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testVoid(request):
pass
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport,
)
resp = yield tchannel.thrift(service.testVoid())
assert resp.headers == {}
assert resp.body is None
@pytest.mark.gen_test
@pytest.mark.call
def test_void_with_headers():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testVoid(request):
assert request.headers == {'req': 'header'}
return Response(headers={'resp': 'header'})
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport,
)
resp = yield tchannel.thrift(
service.testVoid(),
headers={'req': 'header'},
)
assert resp.headers == {
'resp': 'header'
}
assert resp.body is None
@pytest.mark.gen_test
@pytest.mark.call
def test_string():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testString(request):
return request.body.thing
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport,
)
resp = yield tchannel.thrift(
service.testString('howdy')
)
assert resp.headers == {}
assert resp.body == 'howdy'
@pytest.mark.gen_test
@pytest.mark.call
def test_byte():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testByte(request):
return request.body.thing
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport,
)
resp = yield tchannel.thrift(
service.testByte(63)
)
assert resp.headers == {}
assert resp.body == 63
@pytest.mark.gen_test
@pytest.mark.call
def test_i32():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testI32(request):
return request.body.thing
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport,
)
# case #1
resp = yield tchannel.thrift(
service.testI32(-1)
)
assert resp.headers == {}
assert resp.body == -1
# case #2
resp = yield tchannel.thrift(
service.testI32(1)
)
assert resp.headers == {}
assert resp.body == 1
@pytest.mark.gen_test
@pytest.mark.call
def test_i64():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testI64(request):
return request.body.thing
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport,
)
resp = yield tchannel.thrift(
service.testI64(-34359738368)
)
assert resp.headers == {}
assert resp.body == -34359738368
@pytest.mark.gen_test
@pytest.mark.call
def test_double():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testDouble(request):
return request.body.thing
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport,
)
resp = yield tchannel.thrift(
service.testDouble(-5.235098235)
)
assert resp.headers == {}
assert resp.body == -5.235098235
@pytest.mark.gen_test
@pytest.mark.call
def test_binary():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testBinary(request):
return request.body.thing
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport,
)
resp = yield tchannel.thrift(
service.testBinary(
# this is ThriftTest.Xtruct(string_thing='hi')
'\x0c\x00\x00\x0b\x00\x01\x00\x00\x00\x0bhi\x00\x00'
)
)
assert resp.headers == {}
assert (
resp.body ==
'\x0c\x00\x00\x0b\x00\x01\x00\x00\x00\x0bhi\x00\x00'
)
@pytest.mark.gen_test
@pytest.mark.call
def test_struct():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testStruct(request):
assert request.body.thing.string_thing == 'req string'
return ThriftTest.Xtruct(
string_thing="resp string"
)
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='service',
thrift_module=ThriftTest,
hostport=server.hostport
)
resp = yield tchannel.thrift(
service.testStruct(ThriftTest.Xtruct("req string"))
)
# verify response
assert isinstance(resp, Response)
assert resp.headers == {}
assert resp.body == ThriftTest.Xtruct("resp string")
@pytest.mark.gen_test
@pytest.mark.call
def test_struct_with_headers():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testStruct(request):
assert isinstance(request, Request)
assert request.headers == {'req': 'header'}
assert request.body.thing.string_thing == 'req string'
return Response(
ThriftTest.Xtruct(
string_thing="resp string"
),
headers={'resp': 'header'},
)
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='service',
thrift_module=ThriftTest,
hostport=server.hostport
)
resp = yield tchannel.thrift(
service.testStruct(ThriftTest.Xtruct("req string")),
headers={'req': 'header'},
)
# verify response
assert isinstance(resp, Response)
assert resp.headers == {'resp': 'header'}
assert resp.body == ThriftTest.Xtruct("resp string")
@pytest.mark.gen_test
@pytest.mark.call
def test_nest():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testNest(request):
return request.body.thing
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport,
)
xstruct = ThriftTest.Xtruct(
string_thing='hi',
byte_thing=1,
i32_thing=-1,
i64_thing=-34359738368,
)
xstruct2 = ThriftTest.Xtruct2(
byte_thing=1,
struct_thing=xstruct,
i32_thing=1,
)
resp = yield tchannel.thrift(
service.testNest(thing=xstruct2)
)
assert resp.headers == {}
assert resp.body == xstruct2
@pytest.mark.gen_test
@pytest.mark.call
def test_map():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testMap(request):
return request.body.thing
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport,
)
x = {
0: 1,
1: 2,
2: 3,
3: 4,
-1: -2,
}
resp = yield tchannel.thrift(
service.testMap(thing=x)
)
assert resp.headers == {}
assert resp.body == x
@pytest.mark.gen_test
@pytest.mark.call
def test_string_map():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testStringMap(request):
return request.body.thing
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport,
)
x = {
'hello': 'there',
'my': 'name',
'is': 'shirly',
}
resp = yield tchannel.thrift(
service.testStringMap(thing=x)
)
assert resp.headers == {}
assert resp.body == x
@pytest.mark.gen_test
@pytest.mark.call
def test_set():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testSet(request):
return request.body.thing
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport,
)
x = set([8, 1, 42])
resp = yield tchannel.thrift(
service.testSet(thing=x)
)
assert resp.headers == {}
assert resp.body == x
@pytest.mark.gen_test
@pytest.mark.call
def test_list():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testList(request):
return request.body.thing
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport,
)
x = [1, 4, 9, -42]
resp = yield tchannel.thrift(
service.testList(thing=x)
)
assert resp.headers == {}
assert resp.body == x
@pytest.mark.gen_test
@pytest.mark.call
def test_enum():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testEnum(request):
return request.body.thing
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport,
)
x = ThriftTest.Numberz.FIVE
resp = yield tchannel.thrift(
service.testEnum(thing=x)
)
assert resp.headers == {}
assert resp.body == x
@pytest.mark.gen_test
@pytest.mark.call
def test_type_def():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testTypedef(request):
return request.body.thing
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport,
)
x = 0xffffffffffffff # 7 bytes of 0xff
resp = yield tchannel.thrift(
service.testTypedef(thing=x)
)
assert resp.headers == {}
assert resp.body == x
@pytest.mark.gen_test
@pytest.mark.call
def test_map_map():
# Given this test server:
server = TChannel(name='server')
map_map = {
-4: {
-4: -4,
-3: -3,
-2: -2,
-1: -1,
},
4: {
1: 1,
2: 2,
3: 3,
4: 4,
},
}
@server.thrift.register(ThriftTest)
def testMapMap(request):
return map_map
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport,
)
resp = yield tchannel.thrift(
service.testMapMap(1)
)
assert resp.headers == {}
assert resp.body == map_map
@pytest.mark.gen_test
@pytest.mark.call
def test_insanity():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testInsanity(request):
result = {
1: {
2: request.body.argument,
3: request.body.argument,
},
2: {
6: ThriftTest.Insanity(),
},
}
return result
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport,
)
x = ThriftTest.Insanity(
userMap={
ThriftTest.Numberz.EIGHT: 0xffffffffffffff,
},
xtructs=[
ThriftTest.Xtruct(
string_thing='Hello2',
byte_thing=74,
i32_thing=0xff00ff,
i64_thing=-34359738368,
),
],
)
resp = yield tchannel.thrift(
service.testInsanity(x)
)
assert resp.headers == {}
assert resp.body == {
1: {
2: x,
3: x,
},
2: {
6: ThriftTest.Insanity(),
},
}
@pytest.mark.gen_test
@pytest.mark.call
def test_multi():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testMulti(request):
return ThriftTest.Xtruct(
string_thing='Hello2',
byte_thing=request.body.arg0,
i32_thing=request.body.arg1,
i64_thing=request.body.arg2,
)
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport,
)
x = ThriftTest.Xtruct(
string_thing='Hello2',
byte_thing=74,
i32_thing=0xff00ff,
i64_thing=0xffffffffd0d0,
)
resp = yield tchannel.thrift(
service.testMulti(
arg0=x.byte_thing,
arg1=x.i32_thing,
arg2=x.i64_thing,
arg3={0: 'abc'},
arg4=ThriftTest.Numberz.FIVE,
arg5=0xf0f0f0,
)
)
assert resp.headers == {}
assert resp.body == x
@pytest.mark.gen_test
@pytest.mark.call
def test_exception():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testException(request):
if request.body.arg == 'Xception':
raise ThriftTest.Xception(
errorCode=1001,
message=request.body.arg
)
elif request.body.arg == 'TException':
# TODO - what to raise here? We dont want dep on Thrift
# so we don't have thrift.TException available to us...
raise Exception()
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='service',
thrift_module=ThriftTest,
hostport=server.hostport
)
# case #1
with pytest.raises(ThriftTest.Xception) as e:
yield tchannel.thrift(
service.testException(arg='Xception')
)
assert e.value.errorCode == 1001
assert e.value.message == 'Xception'
# case #2
with pytest.raises(ProtocolError):
yield tchannel.thrift(
service.testException(arg='TException')
)
# case #3
resp = yield tchannel.thrift(
service.testException(arg='something else')
)
assert isinstance(resp, Response)
assert resp.headers == {}
assert resp.body is None
@pytest.mark.gen_test
@pytest.mark.call
def test_multi_exception():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testMultiException(request):
if request.body.arg0 == 'Xception':
raise ThriftTest.Xception(
errorCode=1001,
message='This is an Xception',
)
elif request.body.arg0 == 'Xception2':
raise ThriftTest.Xception2(
errorCode=2002
)
return ThriftTest.Xtruct(string_thing=request.body.arg1)
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='service',
thrift_module=ThriftTest,
hostport=server.hostport
)
# case #1
with pytest.raises(ThriftTest.Xception) as e:
yield tchannel.thrift(
service.testMultiException(arg0='Xception', arg1='thingy')
)
assert e.value.errorCode == 1001
assert e.value.message == 'This is an Xception'
# case #2
with pytest.raises(ThriftTest.Xception2) as e:
yield tchannel.thrift(
service.testMultiException(arg0='Xception2', arg1='thingy')
)
assert e.value.errorCode == 2002
# case #3
resp = yield tchannel.thrift(
service.testMultiException(arg0='something else', arg1='thingy')
)
assert isinstance(resp, Response)
assert resp.headers == {}
assert resp.body == ThriftTest.Xtruct('thingy')
@pytest.mark.gen_test
@pytest.mark.call
def test_oneway():
# Given this test server:
server = TChannel(name='server')
# TODO - server should raise same exception as client
with pytest.raises(AssertionError):
@server.thrift.register(ThriftTest)
def testOneway(request):
pass
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport,
)
with pytest.raises(OneWayNotSupportedError):
yield tchannel.thrift(service.testOneway(1))
@pytest.mark.gen_test
@pytest.mark.call
def test_second_service_blah_blah():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testString(request):
return request.body.thing
@server.thrift.register(SecondService)
def blahBlah(request):
pass
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport
)
second_service = thrift_request_builder(
service='server',
thrift_module=SecondService,
hostport=server.hostport,
)
resp = yield tchannel.thrift(service.testString('thing'))
assert isinstance(resp, Response)
assert resp.headers == {}
assert resp.body == 'thing'
resp = yield tchannel.thrift(second_service.blahBlah())
assert isinstance(resp, Response)
assert resp.headers == {}
assert resp.body is None
@pytest.mark.gen_test
@pytest.mark.call
def test_second_service_second_test_string():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testString(request):
return request.body.thing
@server.thrift.register(SecondService)
@gen.coroutine
def secondtestString(request):
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport,
)
resp = yield tchannel.thrift(
service.testString(request.body.thing),
)
raise gen.Return(resp)
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport
)
second_service = thrift_request_builder(
service='server',
thrift_module=SecondService,
hostport=server.hostport,
)
resp = yield tchannel.thrift(service.testString('thing'))
assert isinstance(resp, Response)
assert resp.headers == {}
assert resp.body == 'thing'
resp = yield tchannel.thrift(
second_service.secondtestString('second_string')
)
assert isinstance(resp, Response)
assert resp.headers == {}
assert resp.body == 'second_string'
@pytest.mark.gen_test
@pytest.mark.call
def test_call_response_should_contain_transport_headers():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testString(request):
return request.body.thing
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport,
)
resp = yield tchannel.thrift(service.testString('hi'))
# verify response
assert isinstance(resp, Response)
assert resp.headers == {}
assert resp.body == 'hi'
# verify response transport headers
assert isinstance(resp.transport, TransportHeaders)
assert resp.transport.scheme == schemes.THRIFT
assert resp.transport.failure_domain is None
@pytest.mark.gen_test
@pytest.mark.call
def test_call_unexpected_error_should_result_in_protocol_error():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testMultiException(request):
raise Exception('well, this is unfortunate')
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport,
)
with pytest.raises(ProtocolError):
yield tchannel.thrift(
service.testMultiException(arg0='Xception', arg1='thingy')
)
@pytest.mark.gen_test
@pytest.mark.call
def test_value_expected_but_none_returned_should_error():
# Given this test server:
server = TChannel(name='server')
@server.thrift.register(ThriftTest)
def testString(request):
pass
server.listen()
# Make a call:
tchannel = TChannel(name='client')
service = thrift_request_builder(
service='server',
thrift_module=ThriftTest,
hostport=server.hostport,
)
with pytest.raises(ValueExpectedError):
yield tchannel.thrift(
service.testString('no return!?')
)
@pytest.mark.gen_test
@pytest.mark.call
@pytest.mark.parametrize('headers', [
{'key': 1},
{1: 'value'},
{'key': {'key': 'value'}},
100,
-100,
.1,
10 << 6,
True,
Exception(),
])
def test_headers_should_be_a_map_of_strings(headers):
tchannel = TChannel('client')
with pytest.raises(ValueError):
yield tchannel.thrift(
request=mock.MagicMock(),
headers=headers,
)
|
{
"content_hash": "755cfa40be2fd521e302528475f148b7",
"timestamp": "",
"source": "github",
"line_count": 1165,
"max_line_length": 75,
"avg_line_length": 20.885836909871244,
"alnum_prop": 0.6091566661186915,
"repo_name": "Willyham/tchannel-python",
"id": "51225f5a3ea7990e07a5457b4b32aaa6d5faf493",
"size": "25435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/schemes/test_thrift.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1902"
},
{
"name": "Python",
"bytes": "719974"
},
{
"name": "Shell",
"bytes": "1473"
},
{
"name": "Thrift",
"bytes": "13859"
}
],
"symlink_target": ""
}
|
"""Basic CRUD operations for the server.
===============================
Imports :
sqlalchemy.orm.with_polymorphic : Load columns for inheriting classes.
Ref : http://docs.sqlalchemy.org/en/latest/orm/query.html
sqlalchemy.exists : A convenience method that turns a query into an EXISTS subquery
of the form EXISTS (SELECT 1 FROM … WHERE …).
Ref : http://docs.sqlalchemy.org/en/latest/orm/query.html
sqlalchemy.orm.exc.NoResultFound : A database result was required but none was found.
Ref : http://docs.sqlalchemy.org/en/latest/orm/exceptions.html?highlight=result%20found#sqlalchemy.orm.exc.NoResultFound
sqlalchemy.orm.session.Session : Manages persistence operations for ORM-mapped objects.
Ref : http://docs.sqlalchemy.org/en/latest/orm/session_api.html?highlight=session#module-sqlalchemy.orm.session
hydrus.data.db_models.Graph : Model for a graph that store triples of instance from the other models to map relationships.
hydrus.data.db_models.BaseProperty : Model for Basic Property.
hydrus.data.db_models.RDFClass : Model for Classes specifically RDF-OWL or RDF-HYDRA classes.
hydrus.data.db_models.Instance : Model for Object/Resource. Instances are instances of some kind/classes that are served through the API.
hydrus.data.db_models.Terminal : Model for Terminals which are numbers or string that can be referenced by a Property.
hydrus.data.db_models.GraphIAC : Graph model for Instance >> AbstractProperty >> Class.
hydrus.data.db_models.GraphIIT : Graph model for Instance >> InstanceProperty >> Terminal.
hydrus.data.db_models.GraphIII : Graph model for Instance >> InstanceProperty >> Instance.
Ref : ./db_models.py
hydrus.data.exceptions : Contains all exceptions .
typing : Module which provides support for type hints .
""" # nopep8
import copy
from sqlalchemy.orm.exc import NoResultFound
from hydrus.data.db_models import Modification
from hydrus.data.exceptions import (
InstanceExists,
PageNotFound,
IncompatibleParameters,
OffsetOutOfRange,
)
from hydrus.data.crud_helpers import (
recreate_iri,
attach_hydra_view,
pre_process_pagination_parameters,
parse_search_params,
)
# from sqlalchemy.orm.session import Session
from sqlalchemy.orm.scoping import scoped_session
from hydra_python_core.doc_writer import HydraDoc
from typing import Dict, Optional, Any, List
from hydrus.data.resource_based_classes import (
get_object,
insert_object,
update_object,
delete_object,
get_all_filtered_instances,
get_single_response,
get_database_class,
get_collection_member,
delete_collection_member,
)
from hydrus.conf import get_host_domain
def get(
id_: str,
type_: str,
api_name: str,
session: scoped_session,
path: str = None,
collection: bool = False,
) -> Dict[str, str]:
"""Retrieve an Instance with given ID from the database [GET].
:param id_: id of object to be fetched
:param type_: type of object
:param api_name: name of api specified while starting server
:param session: sqlalchemy scoped session
:param path: endpoint
:param collection: True if the type_ is of a collection, False for any other class
:return: response to the request
Raises:
ClassNotFound: If the `type_` is not a valid/defined RDFClass.
InstanceNotFound: If no Instance of the 'type_` class if found.
"""
query_info = {"@type": type_, "id_": id_}
object_template = get_object(query_info, session, collection)
object_template["@id"] = f"{get_host_domain()}/{api_name}/{path}/{id_}"
return object_template
def insert(
doc_: HydraDoc,
object_: Dict[str, Any],
session: scoped_session,
id_: Optional[str] = None,
collection: bool = False,
) -> str:
"""Insert an object to database [POST] and returns the inserted object.
:param doc_ : Hydra Doc object
:param object_: object to be inserted
:param session: sqlalchemy scoped session
:param id_: id of the object to be inserted (optional param)
:param collection: True if the type_ is of a collection, False for any other class
:return: ID of object inserted
Raises:
ClassNotFound: If `object_["@type"] is not a valid/defined RDFClass.
InstanceExists: If an Instance `id_` already exists.
PropertyNotFound: If any property name of `object_` other than `@type` or `@context`
is not a valid/defined property.
NotInstanceProperty: If any property of `object_` is a dictionary but
not an Instance property
NotAbstractProperty: If any property of `object_` is a
valid/defined RDFClass but is not a dictionary neither an Abstract Property
"""
object_template = copy.deepcopy(object_)
if id_ is not None:
object_template["id"] = id_
inserted_object_id = insert_object(doc_, object_template, session, collection)
return inserted_object_id
def insert_multiple(
doc: HydraDoc,
objects_: List[Dict[str, Any]],
session: scoped_session,
id_: Optional[str] = ""
) -> List[str]:
"""
Adds a list of object with given ids to the database
:param doc : Hydra Doc object
:param objects_: List of dict's to be added to the database
:param session: scoped session from getSession in utils
:param id_: optional parameter containing the ids of objects that have to be inserted
:return: Ids that have been inserted
Raises:
ClassNotFound: If any dict of `objects_` is not a valid/defined RDFClass.
InstanceExists: If an Instance with same id already exists.
PropertyNotFound: If for any dict in 'objects_' if any property is not
a valid/defined property.
NotAnInstanceProperty: If any property of a dict in `object_` is a dictionary but
not an Instance property
NotAnAbstractProperty: If any property of a dict in `object_` is a
valid/defined RDFClass but is not a dictionary neither an Abstract Property
"""
if id_:
id_list = id_.split(",")
else:
id_list = None
# list to hold all the ids of inserted objects
instance_id_list = []
for index in range(len(objects_)):
id_of_object_ = None
object_ = objects_[index]
# check if id_ exist for object at that index
try:
id_of_object_ = id_list[index]
except IndexError:
pass
except TypeError:
pass
inserted_object_id = insert(doc, object_, session, id_of_object_)
instance_id_list.append(inserted_object_id)
return instance_id_list
def delete(
id_: str, type_: str, session: scoped_session, collection: bool = False
) -> None:
"""Delete an Instance and all its relations from DB given id [DELETE].
:param id_: id of object to be deleted
:param type_: type of object to be deleted
:param session: sqlalchemy scoped session
Raises:
ClassNotFound: If `type_` does not represent a valid/defined RDFClass.
InstanceNotFound: If no instace of type `type_` with id `id_` exists.
"""
query_info = {"@type": type_, "id_": id_}
delete_object(query_info, session, collection)
def delete_multiple(id_: List[int], type_: str, session: scoped_session) -> None:
"""
To delete multiple rows in a single request
:param id_: list of ids for objects to be deleted\
:param type_: type of object to be deleted
:param session: sqlalchemy scoped session
Raises:
ClassNotFound: If `type_` does not represent a valid/defined RDFClass.
InstanceNotFound: If any instance with type 'type_' and any id in 'id_' list
does not exist.
"""
id_list = id_.split(",")
for object_id_ in id_list:
delete(object_id_, type_, session)
def update(
doc_: HydraDoc,
id_: str,
type_: str,
object_: Dict[str, str],
session: scoped_session,
api_name: str,
path: str = None,
collection: bool = False,
) -> str:
"""Update an object properties based on the given object [PUT].
:param doc_ : Hydra Doc object
:param id_: if of object to be updated
:param type_: type of object to be updated
:param object_: object that has to be inserted
:param session: sqlalchemy scoped session
:param api_name: api name specified while starting server
:param path: endpoint
:param collection: True if the type_ is of a collection, False for any other class
:return: id of updated object
"""
query_info = {"@type": type_, "id_": id_}
updated_object_id = update_object(doc_, object_, query_info, session, collection)
return updated_object_id
def get_collection(
API_NAME: str,
type_: str,
session: scoped_session,
paginate: bool,
page_size: int,
search_params: Dict[str, Any]=None,
path: str = None,
collection: bool = False,
) -> Dict[str, Any]:
"""Retrieve a type of collection from the database.
:param API_NAME: api name specified while starting server
:param type_: type of object to be updated
:param session: sqlalchemy scoped session
:param paginate: Enable/disable pagination
:param page_size: Number maximum elements showed in a page
:param search_params: Query parameters
:param path: endpoint
:param collection: True if the type_ is of a collection, False for any other class
:return: response containing a page of the objects of that particular type_
Raises:
ClassNotFound: If `type_` does not represent a valid/defined RDFClass.
"""
database_search_params = copy.deepcopy(search_params)
pagination_parameters = ["page", "pageIndex", "limit", "offset"]
# remove pagination params before filtering in the database
for param in database_search_params.copy():
if param in pagination_parameters:
database_search_params.pop(param)
database_search_params = parse_search_params(database_search_params)
filtered_instances = get_all_filtered_instances(
session, database_search_params, type_, collection
)
collection_template = pagination(
filtered_instances, path, type_, API_NAME, search_params, paginate, page_size
)
return collection_template
def get_single(
type_: str, api_name: str, session: scoped_session, path: str = None
) -> Dict[str, Any]:
"""Get instance of classes with single objects.
:param type_: type of object to be updated
:param api_name: api name specified while starting server
:param session: sqlalchemy scoped session
:param path: endpoint
:return: response containing information about a single object
Raises:
ClassNotFound: If `type_` does not represt a valid/defined RDFClass.
InstanceNotFound: If no Instance with type `type_` exists.
"""
instance = get_single_response(session, type_)
object_ = get(instance.id, type_, session=session, api_name=api_name, path=path)
if path is not None:
object_["@id"] = f"{get_host_domain()}/{api_name}/{path}"
else:
object_["@id"] = f"{get_host_domain()}/{api_name}/{type_}"
return object_
def insert_single(object_: Dict[str, Any], session: scoped_session) -> Any:
"""Insert instance of classes with single objects.
:param object_: object to be inserted
:param session: sqlalchemy scoped session
:return:
Raises:
ClassNotFound: If `type_` does not represt a valid/defined RDFClass.
Instance: If an Instance of type `type_` already exists.
"""
type_ = object_["@type"]
database_class = get_database_class(type_)
try:
session.query(database_class).all()[-1]
except (NoResultFound, IndexError, ValueError):
return insert(object_, session=session)
raise InstanceExists(type_)
def update_single(
object_: Dict[str, Any], session: scoped_session, api_name: str, path: str = None
) -> int:
"""Update instance of classes with single objects.
:param object_: new object
:param session: sqlalchemy scoped session
:param api_name: api name specified while starting server
:param path: endpoint
:return: id of the updated object
Raises:
ClassNotFound: If `object['@type']` does not represt a valid/defined RDFClass.
InstanceNotFound: If no Instance of the class exists.
"""
type_ = object_["@type"]
instance = get_single_response(session, type_)
return update(
id_=instance.id,
type_=type_,
object_=object_,
session=session,
api_name=api_name,
path=path,
)
def delete_single(type_: str, session: scoped_session) -> None:
"""Delete instance of classes with single objects.
:param type_: type of object to be deleted
:param session: sqlalchemy scoped session
:return: None
Raises:
ClassNotFound: If `type_` does not represt a valid/defined RDFClass.
InstanceNotFound: If no Instance of the class exists.
"""
instance = get_single_response(session, type_)
return delete(instance.id, type_, session=session)
def get_member(
collection_id: str,
member_id: str,
type_: str,
api_name: str,
session: scoped_session,
path: str = None,
) -> Dict[str, str]:
"""Retrieve an Instance with given IDs from the database [GET].
:param collection_id: id of the collection to be fetched
:param member_id: id of the member to be fetched
:param type_: type of object
:param api_name: name of api specified while starting server
:param session: sqlalchemy scoped session
:param path: endpoint
:return: response to the request
Raises:
ClassNotFound: If the `type_` is not a valid/defined RDFClass.
InstanceNotFound: If no Instance of the 'type_` class if found.
"""
query_info = {
"@type": type_,
"member_id": member_id,
"collection_id": collection_id,
}
object_template = get_collection_member(query_info, session)
object_template["@id"] = f"{get_host_domain()}/{api_name}/{path}/{collection_id}"
return object_template
def delete_member(
collection_id: str, member_id: str, type_: str, session: scoped_session
) -> None:
"""Delete an Instance and all its relations from DB given id [DELETE].
:param collection_id: id of the collection
:param member_id: id of member to be deleted
:param type_: type of object to be deleted
:param session: sqlalchemy scoped session
Raises:
ClassNotFound: If `type_` does not represent a valid/defined RDFClass.
MemberInstanceNotFound: If no instace of type `type_` with id `id_` exists.
"""
query_info = {
"@type": type_,
"member_id": member_id,
"collection_id": collection_id,
}
delete_collection_member(query_info, session)
def delete_multiple_members(
collection_id_: str,
id_: List[str],
type_: str,
session: scoped_session) -> None:
"""
To delete multiple members in a single request
:param collection_id_: ID of the collection
:param id_: list of ids of members to be deleted
:param type_: type of object to be deleted
:param session: sqlalchemy scoped session
Raises:
ClassNotFound: If `type_` does not represent a valid/defined RDFClass.
MemberInstanceNotFound: If any instance with type 'type_' and any id in 'id_' list
does not exist.
"""
id_list = id_.split(',')
for member_id_ in id_list:
delete_member(collection_id_, member_id_, type_, session)
def insert_modification_record(
method: str, resource_url: str, session: scoped_session
) -> int:
"""
Insert a modification record into the database.
:param method: HTTP method type of related operation.
:param resource_url: URL of resource modified.
:param session: sqlalchemy session.
:return: ID of new modification record.
"""
modification = Modification(method=method, resource_url=resource_url)
session.add(modification)
session.commit()
return modification.job_id
def get_last_modification_job_id(session: scoped_session) -> str:
"""
Get job id of most recent modification record stored in the db.
:param session: sqlalchemy session
:return: job id of recent modification.
"""
last_modification = (
session.query(Modification).order_by(Modification.job_id.desc()).first()
)
if last_modification is None:
last_job_id = ""
else:
last_job_id = last_modification.job_id
return last_job_id
def get_modification_table_diff(
session: scoped_session, agent_job_id: str = None
) -> List[Dict[str, Any]]:
"""
Get modification table difference.
:param session: sqlalchemy session.
:param agent_job_id: Job id from the client.
:return: List of all modifications done after job with job_id = agent_job_id.
"""
# If agent_job_id is not given then return all the elements.
if agent_job_id is None:
modifications = (
session.query(Modification).order_by(Modification.job_id.asc()).all()
)
# If agent_job_id is given then return all records which are older
# than the record with agent_job_id.
else:
try:
record_for_agent_job_id = (
session.query(Modification)
.filter(Modification.job_id == agent_job_id)
.one()
)
except NoResultFound:
return []
modifications = (
session.query(Modification)
.filter(Modification.job_id > record_for_agent_job_id.job_id)
.order_by(Modification.job_id.asc())
.all()
)
# Create response body
list_of_modification_records = []
for modification in modifications:
modification_record = {
"job_id": modification.job_id,
"method": modification.method,
"resource_url": modification.resource_url,
}
list_of_modification_records.append(modification_record)
return list_of_modification_records
def pagination(
filtered_instances, path, type_, API_NAME, search_params, paginate, page_size
):
"""Add pagination to the response and return the response
:param filtered_instances: instances after filtered from the database query
:param path: endpoint
:param type_: type of object to be updated
:param API_NAME: api name specified while starting server
:param search_params: Query parameters
:param paginate: Enable/disable pagination
:param page_size: Number maximum elements showed in a page
:return: response containing a page of the objects of that particular type_
"""
collection_template = {
"@id": f"{get_host_domain()}/{API_NAME}/{path}/",
"@context": None,
"@type": f"{path}",
"members": [],
} # type: Dict[str, Any]
result_length = len(filtered_instances)
try:
# To paginate, calculate offset and page_limit values for pagination of search results
page, page_size, offset = pre_process_pagination_parameters(
search_params=search_params,
paginate=paginate,
page_size=page_size,
result_length=result_length,
)
except (IncompatibleParameters, PageNotFound, OffsetOutOfRange):
raise
current_page_size = page_size
if result_length - offset < page_size:
current_page_size = result_length - offset
for i in range(offset, offset + current_page_size):
object_template = {
"@id": f"{get_host_domain()}/{API_NAME}/{type_}/{filtered_instances[i].id}",
"@type": type_,
}
collection_template["members"].append(object_template)
# If pagination is disabled then stop and return the collection template
collection_template["hydra:totalItems"] = result_length
if paginate is False:
return collection_template
# Calculate last page number
if result_length != 0 and result_length % page_size == 0:
last = result_length // page_size
else:
last = result_length // page_size + 1
if page < 1 or page > last:
raise PageNotFound(str(page))
recreated_iri = recreate_iri(API_NAME, path, search_params=search_params)
# Decide which parameter to use to provide navigation
if "offset" in search_params:
paginate_param = "offset"
elif "pageIndex" in search_params:
paginate_param = "pageIndex"
else:
paginate_param = "page"
attach_hydra_view(
collection_template=collection_template,
paginate_param=paginate_param,
result_length=result_length,
iri=recreated_iri,
page_size=page_size,
offset=offset,
page=page,
last=last,
)
return collection_template
def item_exists(item_type, item_id, session):
"""Check if the instance with type 'item_type' and id 'item_id'
exists in the database. Returns True if exists else False
:param item_type: The @type of the instance
:type item_type: str
:param item_id: The id of the instance in the database
:type item_id: str
:param session: sqlalchemy scoped session
"""
database_class = get_database_class(item_type)
return session.query(database_class.id).filter_by(id=item_id).scalar() is not None
|
{
"content_hash": "4cece668a7049f8256d77ffa579211ac",
"timestamp": "",
"source": "github",
"line_count": 615,
"max_line_length": 141,
"avg_line_length": 35.00162601626016,
"alnum_prop": 0.6611074979095047,
"repo_name": "HTTP-APIs/hydrus",
"id": "b8902cabb3fcec83a2b13e01fa313c4cdac2fea2",
"size": "21530",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "hydrus/data/crud.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "607"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "404729"
}
],
"symlink_target": ""
}
|
import json
import logging
import socket
import time
from telemetry.core.backends.chrome import websocket
class DispatchNotificationsUntilDoneTimeoutException(Exception):
"""Exception that can be thrown from DispatchNotificationsUntilDone to
indicate timeout exception of the function.
"""
def __init__(self, elapsed_time):
super(DispatchNotificationsUntilDoneTimeoutException, self).__init__()
self.elapsed_time = elapsed_time
class InspectorWebsocket(object):
def __init__(self, notification_handler=None, error_handler=None):
"""Create a websocket handler for communicating with Inspectors.
Args:
notification_handler: A callback for notifications received as a result of
calling DispatchNotifications() or DispatchNotificationsUntilDone().
Must accept a single JSON object containing the Inspector's
notification. May return True to indicate the dispatching is done for
DispatchNotificationsUntilDone.
error_handler: A callback for errors in communicating with the Inspector.
Must accept a single numeric parameter indicated the time elapsed before
the error.
"""
self._socket = None
self._cur_socket_timeout = 0
self._next_request_id = 0
self._notification_handler = notification_handler
self._error_handler = error_handler
def Connect(self, url, timeout=10):
assert not self._socket
self._socket = websocket.create_connection(url, timeout=timeout)
self._cur_socket_timeout = 0
self._next_request_id = 0
def Disconnect(self):
if self._socket:
self._socket.close()
self._socket = None
def SendAndIgnoreResponse(self, req):
req['id'] = self._next_request_id
self._next_request_id += 1
data = json.dumps(req)
self._socket.send(data)
logging.debug('sent [%s]', data)
def SyncRequest(self, req, timeout=10):
self.SendAndIgnoreResponse(req)
while self._socket:
res = self._Receive(timeout)
if 'id' in res and res['id'] == req['id']:
return res
def DispatchNotifications(self, timeout=10):
self._Receive(timeout)
def DispatchNotificationsUntilDone(self, timeout):
"""Dispatch notifications until notification_handler return True.
Args:
timeout: the total timeout value for dispatching multiple notifications
until done.
"""
if timeout < self._cur_socket_timeout:
self._SetTimeout(timeout)
start_time = time.time()
while self._socket:
try:
if not self._Receive(timeout):
break
except websocket.WebSocketTimeoutException:
pass
elapsed_time = time.time() - start_time
if elapsed_time > timeout:
raise DispatchNotificationsUntilDoneTimeoutException(elapsed_time)
def _SetTimeout(self, timeout):
if self._cur_socket_timeout != timeout:
self._socket.settimeout(timeout)
self._cur_socket_timeout = timeout
def _Receive(self, timeout=10):
self._SetTimeout(timeout)
start_time = time.time()
try:
while self._socket:
data = self._socket.recv()
res = json.loads(data)
logging.debug('got [%s]', data)
if 'method' in res and self._notification_handler(res):
return None
return res
except (socket.error, websocket.WebSocketException):
elapsed_time = time.time() - start_time
self._error_handler(elapsed_time)
|
{
"content_hash": "ec66b96efa3c87d5308a5bf8a172a91c",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 80,
"avg_line_length": 31.685185185185187,
"alnum_prop": 0.6858562244301578,
"repo_name": "ondra-novak/chromium.src",
"id": "00c18a49646ac776d7cf000e2b159d76d98be74f",
"size": "3585",
"binary": false,
"copies": "7",
"ref": "refs/heads/nw",
"path": "tools/telemetry/telemetry/core/backends/chrome/inspector_websocket.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "35318"
},
{
"name": "Batchfile",
"bytes": "7621"
},
{
"name": "C",
"bytes": "8692951"
},
{
"name": "C++",
"bytes": "206833388"
},
{
"name": "CSS",
"bytes": "871479"
},
{
"name": "HTML",
"bytes": "24541148"
},
{
"name": "Java",
"bytes": "5457985"
},
{
"name": "JavaScript",
"bytes": "17791684"
},
{
"name": "Makefile",
"bytes": "92563"
},
{
"name": "Objective-C",
"bytes": "1312233"
},
{
"name": "Objective-C++",
"bytes": "7105758"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "218379"
},
{
"name": "Perl",
"bytes": "69392"
},
{
"name": "Protocol Buffer",
"bytes": "387183"
},
{
"name": "Python",
"bytes": "6929739"
},
{
"name": "Shell",
"bytes": "473664"
},
{
"name": "Standard ML",
"bytes": "4131"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "15206"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import shutil
import pytest
from django.conf import settings
from django.core import management
from faker import Faker
from haystack.query import SearchQuerySet
from machina.apps.forum_search.forms import SearchForm
from machina.core.db.models import get_model
from machina.core.loading import get_class
from machina.test.factories import PostFactory
from machina.test.factories import UserFactory
from machina.test.factories import create_category_forum
from machina.test.factories import create_forum
from machina.test.factories import create_topic
faker = Faker()
Post = get_model('forum_conversation', 'Post')
Topic = get_model('forum_conversation', 'Topic')
PermissionHandler = get_class('forum_permission.handler', 'PermissionHandler')
assign_perm = get_class('forum_permission.shortcuts', 'assign_perm')
@pytest.mark.django_db
class TestSearchForm(object):
@pytest.yield_fixture(autouse=True)
def setup(self):
# Permission handler
self.perm_handler = PermissionHandler()
# Create a basic user
self.user = UserFactory.create(username='foobar')
# Set up the following forum tree:
#
# top_level_cat
# forum_1
# forum_2
# forum_2_child_1
# top_level_forum_1
# top_level_forum_2
# sub_cat
# sub_sub_forum
# top_level_forum_3
# forum_3
# forum_3_child_1
# forum_3_child_1_1
# deep_forum
# last_forum
#
self.top_level_cat = create_category_forum()
self.forum_1 = create_forum(parent=self.top_level_cat)
self.forum_2 = create_forum(parent=self.top_level_cat)
self.forum_2_child_1 = create_forum(parent=self.forum_2)
self.top_level_forum_1 = create_forum()
self.top_level_forum_2 = create_forum()
self.sub_cat = create_category_forum(parent=self.top_level_forum_2)
self.sub_sub_forum = create_forum(parent=self.sub_cat)
self.top_level_forum_3 = create_forum()
self.forum_3 = create_forum(parent=self.top_level_forum_3)
self.forum_3_child_1 = create_forum(parent=self.forum_3)
self.forum_3_child_1_1 = create_forum(parent=self.forum_3_child_1)
self.deep_forum = create_forum(parent=self.forum_3_child_1_1)
self.last_forum = create_forum()
# Set up a topic and some posts
self.topic_1 = create_topic(forum=self.forum_1, poster=self.user)
self.post_1 = PostFactory.create(topic=self.topic_1, poster=self.user)
self.topic_2 = create_topic(forum=self.forum_2, poster=self.user)
self.post_2 = PostFactory.create(topic=self.topic_2, poster=self.user)
self.topic_3 = create_topic(forum=self.forum_2_child_1, poster=self.user)
self.post_3 = PostFactory.create(topic=self.topic_3, poster=self.user)
# Assign some permissions
assign_perm('can_read_forum', self.user, self.top_level_cat)
assign_perm('can_read_forum', self.user, self.forum_1)
assign_perm('can_read_forum', self.user, self.forum_2)
assign_perm('can_read_forum', self.user, self.forum_2_child_1)
assign_perm('can_read_forum', self.user, self.top_level_forum_1)
self.sqs = SearchQuerySet()
management.call_command('clear_index', verbosity=0, interactive=False)
management.call_command('update_index', verbosity=0)
yield
# teardown
# --
management.call_command('clear_index', verbosity=0, interactive=False)
@classmethod
def teardown_class(cls):
shutil.rmtree(settings.HAYSTACK_CONNECTIONS['default']['PATH'])
def test_can_search_forum_posts(self):
# Setup
form = SearchForm(
{'q': self.topic_1.first_post.subject},
user=self.user,
)
# Run
results = form.search()
# Check
assert form.is_valid()
assert results[0].forum == self.topic_1.forum.pk
def test_cannot_search_forum_posts_if_the_user_has_not_the_required_permissions(self):
# Setup
u1 = UserFactory.create()
form = SearchForm(
{'q': self.topic_1.first_post.content},
user=u1,
)
# Run
results = form.search()
# Check
assert form.is_valid()
assert not len(results)
def test_cannot_search_forum_posts_if_the_form_is_not_valid(self):
# Setup
form = SearchForm(
{
'q': self.topic_1.first_post.content,
'search_forums': [1000, ],
},
user=self.user,
)
# Run
results = form.search()
# Check
assert not len(results)
def test_can_search_forum_posts_by_using_only_topic_subjects(self):
# Setup
form = SearchForm(
{
'q': self.topic_1.subject,
'search_topics': True,
},
user=self.user,
)
# Run
results = form.search()
# Check
assert form.is_valid()
assert results[0].forum == self.topic_1.forum.pk
def test_can_search_forum_posts_by_using_the_registered_poster_name(self):
# Setup
self.topic_1.first_post.subject = 'newsubject'
self.topic_1.first_post.save()
self.topic_2.first_post.subject = 'newsubject'
self.topic_2.first_post.save()
self.topic_3.first_post.subject = 'newsubject'
self.topic_3.first_post.save()
management.call_command('clear_index', verbosity=0, interactive=False)
management.call_command('update_index', verbosity=0)
form = SearchForm(
{
'q': 'newsubject',
'search_poster_name': self.user.username,
},
user=self.user,
)
# Run
results = form.search()
# Check
assert form.is_valid()
assert [r.object for r in results] == [self.post_1, self.post_2, self.post_3, ]
def test_can_search_forum_posts_by_using_the_anonymous_poster_name(self):
# Setup
self.topic_1.first_post.subject = 'newsubject'
self.topic_1.first_post.save()
self.topic_2.first_post.subject = 'newsubject'
self.topic_2.first_post.save()
self.topic_3.first_post.subject = 'newsubject'
self.topic_3.first_post.save()
post_4 = PostFactory.create(
subject='newsubject', topic=self.topic_3, poster=None, username='newtest')
management.call_command('clear_index', verbosity=0, interactive=False)
management.call_command('update_index', verbosity=0)
form = SearchForm(
{
'q': 'newsubject',
'search_poster_name': 'newtest',
},
user=self.user,
)
# Run
results = form.search()
# Check
assert form.is_valid()
assert [r.object for r in results] == [post_4, ]
def test_can_search_forum_posts_by_using_a_set_of_forums(self):
# Setup
self.topic_2.first_post.subject = self.topic_1.subject
self.topic_2.first_post.save()
management.call_command('clear_index', verbosity=0, interactive=False)
management.call_command('update_index', verbosity=0)
form = SearchForm(
{
'q': self.topic_1.subject,
'search_forums': [self.forum_1.pk, self.forum_2.pk, ],
},
user=self.user,
)
# Run
results = form.search()
# Check
assert form.is_valid()
assert [r.object for r in results] == [self.post_1, self.post_2, ]
|
{
"content_hash": "b7ea0751e445eaa27920859ad87e489e",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 90,
"avg_line_length": 34.05194805194805,
"alnum_prop": 0.5894991100940757,
"repo_name": "franga2000/django-machina",
"id": "ef96c93e483e1c94145e8cae385dce26a92c6b0b",
"size": "7891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/functional/search/test_forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13665"
},
{
"name": "HTML",
"bytes": "138474"
},
{
"name": "JavaScript",
"bytes": "5866"
},
{
"name": "Makefile",
"bytes": "1599"
},
{
"name": "Python",
"bytes": "696565"
}
],
"symlink_target": ""
}
|
import sys, os, time, atexit
from signal import SIGTERM
import errno
def pid_exists(pid):
"""Check whether pid exists in the current process table.
UNIX only.
"""
if(pid < 0):
return False
if(pid == 0):
# According to "man 2 kill" PID 0 refers to every process
# in the process group of the calling process.
# On certain systems 0 is a valid PID but we have no way
# to know that in a portable fashion.
raise ValueError('invalid PID 0')
try:
os.kill(pid, 0)
except OSError as err:
if(err.errno == errno.ESRCH):
# ESRCH == No such process
return False
elif(err.errno == errno.EPERM):
# EPERM clearly means there's a process to deny access to
return True
else:
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
raise
else:
return True
class Daemon:
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if(pid > 0):
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.pidfile,'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self, daemonize = True):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if(pid_exists(pid)):
message = "pid %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
if(daemonize):
self.daemonize()
else:
# No redirect of standard file descriptors
pass
self.run()
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print(str(err))
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
|
{
"content_hash": "9df32870386816ea99eb67149204b612",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 110,
"avg_line_length": 29.656050955414013,
"alnum_prop": 0.5148195876288659,
"repo_name": "sflis/plms",
"id": "e1bb5abc1a3a4d8b7f43110b1d5ed4e511e60ff9",
"size": "4679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plms/daemon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "84574"
}
],
"symlink_target": ""
}
|
def final_floor(input_string):
return input_string.count("(") - input_string.count(")")
if __name__=="__main__":
input_string = open("input.txt").read()
print final_floor(input_string)
|
{
"content_hash": "86d921fb501873a8b900ff899193cc25",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 60,
"avg_line_length": 18.818181818181817,
"alnum_prop": 0.6038647342995169,
"repo_name": "mnestis/advent2015",
"id": "881836fcd2558df125098e66b0a69456cd1148e2",
"size": "226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "01/part1.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "61015"
}
],
"symlink_target": ""
}
|
pass
|
{
"content_hash": "bf55909ad163b58236e44b86e8441b26",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 4,
"avg_line_length": 5,
"alnum_prop": 0.8,
"repo_name": "michaelhidalgo/7WCSQ",
"id": "5ea298dc1955b38dfcdfef6e989feea9a6ce709b",
"size": "319",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Tools/SQLMap/sqlmap/thirdparty/wininetpton/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "13307"
},
{
"name": "C++",
"bytes": "1641"
},
{
"name": "Objective-C",
"bytes": "516"
},
{
"name": "PLpgSQL",
"bytes": "536"
},
{
"name": "Perl",
"bytes": "2136"
},
{
"name": "Python",
"bytes": "1630594"
},
{
"name": "Shell",
"bytes": "9683"
}
],
"symlink_target": ""
}
|
""" Uploads a Cloud Function from the local source to a GS bucket. """
import zipfile
import hashlib
import base64
import uuid
from io import BytesIO
from copy import deepcopy
GS_SCHEMA_LENGTH = 5
def extract_source_files(imports, local_upload_path):
""" Returns tuples of the imported sources files. """
imported_files = []
for imported_file in imports:
if imported_file.startswith(local_upload_path):
file_name = imported_file[len(local_upload_path):]
file_content = imports[imported_file]
imported_files.append((file_name.lstrip('/'), file_content))
return imported_files
def archive_files(files):
""" Archives input files and returns the result as a binary array. """
output_file = BytesIO()
sources_zip = zipfile.ZipFile(output_file,
mode='w',
compression=zipfile.ZIP_DEFLATED)
for source_file in files:
sources_zip.writestr(*source_file)
sources_zip.close()
return output_file.getvalue()
def upload_source(context_name, project, function, imports, local_path, source_archive_url):
""" Uploads the Cloud Function source code from the local machine
to a Cloud Storage bucket. If the bucket does not exist, creates it.
"""
# Creates an in-memory archive of the Cloud Function source files.
sources = extract_source_files(imports, local_path)
archive = archive_files(sources)
archive_base64 = base64.b64encode(archive).decode("utf-8")
# The Cloud Function knows it was updated when MD5 changes.
md5 = hashlib.md5()
md5.update(archive)
# Splits the upload path into the bucket and archive names.
bucket_name = source_archive_url[:source_archive_url.index('/', GS_SCHEMA_LENGTH)] # pylint: disable=line-too-long
archive_name = source_archive_url[source_archive_url.rfind('/') + 1:]
# Uses a Docker volume to pass the archive between the build steps.
volume = '/cloud-function'
volume_archive_path = volume + '/' + archive_name
volumes = [
{
'name': 'cloud-function',
'path': volume
}
]
# Saves the inline base64-ZIP to a file.
# Need split to two parts to workaround the CloudBuild arg limit
tmpfile = volume_archive_path + ".tmp"
cmd1 = "echo '{}' > {};".format(archive_base64[:3800],
tmpfile)
cmd2 = "echo '{}' >> {};".format(archive_base64[3800:],
tmpfile)
cmd3 = "base64 -d {} > {};".format(tmpfile, volume_archive_path)
build_action = {
'name': '{}-upload-task'.format(context_name),
# https://cloud.google.com/cloud-build/docs/api/reference/rest/v1/projects.builds/create
'action': 'gcp-types/cloudbuild-v1:cloudbuild.projects.builds.create',
'metadata':
{
'runtimePolicy': ['UPDATE_ON_CHANGE']
},
'properties':
{
'projectId': project,
'steps':
[
{ # Saves part 1 to a file.
'name': 'ubuntu',
'args': ['bash', '-c', cmd1],
'volumes': volumes,
},
{ # Saves part 2 to a file.
'name': 'ubuntu',
'args': ['bash', '-c', cmd2],
'volumes': volumes,
},
{ # Create the zip file.
'name': 'ubuntu',
'args': ['bash', '-c', cmd3],
'volumes': volumes,
},
{ # Creates a bucket if one does not exist.
'name': 'gcr.io/cloud-builders/gsutil',
'env': ['CLOUDSDK_CORE_PROJECT={}'.format(project)],
'args': [
'-c',
'gsutil mb {} || true'.format(bucket_name)
],
'entrypoint': '/bin/bash'
},
{ # Uploads the ZIP to the bucket.
'name': 'gcr.io/cloud-builders/gsutil',
'env': ['CLOUDSDK_CORE_PROJECT={}'.format(project)],
'args': [
'cp',
volume_archive_path, source_archive_url
],
'volumes': deepcopy(volumes)
}
],
'timeout': '120s'
}
}
function['properties']['labels'] = {'content-md5': md5.hexdigest()}
return ([build_action], [])
def generate_bucket_name():
""" Generates a bucket name for the Cloud Function. """
return 'gs://cloud-functions-{}'.format(uuid.uuid4())
def generate_archive_name():
""" Generates the Cloud Function's zip name. """
return 'cloud-function-{}.zip'.format(uuid.uuid4())
def generate_upload_path():
""" Generates the full upload path for the Cloud Function. """
return generate_bucket_name() + '/' + generate_archive_name()
|
{
"content_hash": "8985dcbb7598937d7cea312c254fcc17",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 118,
"avg_line_length": 37.38194444444444,
"alnum_prop": 0.5036225153260264,
"repo_name": "GoogleCloudPlatform/serverless-log-trigger-demo",
"id": "6d0a76ad398e2fb5bf85f300b2ac21dc961615c0",
"size": "5979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dm/cloudfunction/upload.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HCL",
"bytes": "20690"
},
{
"name": "Python",
"bytes": "50866"
}
],
"symlink_target": ""
}
|
"""
Reads csv file of streamed bid and ask prices, then ingests into
zipline data.bundle
- Set BID_ASK_STREAM_CSV_FOLDER=path, where path contains the csv data as seen
in _minute_iter docstring
"""
import os
import re
import zipfile
import pandas as pd
import numpy as np
from ..utils import resample
from zipline.utils.cli import maybe_show_progress
def ingest(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir):
"""
Ingests csv data of bid, ask streamed prices, for example
from TrueFX. The csv should look something like
EUR/USD,20160729 20:50:12.065,1.11759,1.11766
EUR/USD,20160729 20:50:12.238,1.11759,1.11766
EUR/USD,20160729 20:50:13.098,1.11759,1.11766
EUR/USD,20160729 20:50:13.183,1.11759,1.11766
EUR/USD,20160729 20:50:15.758,1.11759,1.11766
This function can be registered for a zipline bundle:
.. code-block:: python
from zipline.data.bundles import register
register('bid_ask_stream', bid_ask_stream.ingest,
start_session= pd.Timestamp(os.environ.get("DATA_START"), tz='utc'),
end_session= pd.Timestamp(os.environ.get("DATA_END"), tz='utc'),
calendar='forex', minutes_per_day=1440)
...
"""
path = environ.get('BID_ASK_STREAM_CSV_FOLDER')
instruments = os.listdir(path) # get ["EURSD", "AUDUSD"]
# init metadata
metadata = pd.DataFrame(np.empty(len(instruments), dtype=[
('start_date', 'datetime64[ns]'),
('end_date', 'datetime64[ns]'),
('auto_close_date', 'datetime64[ns]'),
('exchange', 'object'),
('symbol', 'object'),
('asset_name', 'object'),
]))
metadata['start_date'] = metadata.start_date.dt.tz_localize('UTC')
metadata['end_date'] = metadata.end_date.dt.tz_localize('UTC')
metadata['auto_close_date'] = metadata.auto_close_date.dt.tz_localize('UTC')
# Fix calendar
calendar.schedule['market_open'] = calendar.schedule.market_open.dt.tz_localize('UTC')
calendar.schedule['market_close'] = calendar.schedule.market_close.dt.tz_localize('UTC')
def _minute_iter(path):
""" Yields (sid, dataframe) for ingesting, while updating
metadata as a closure
Parameters
----------
path : str
The path to a folder containing sub folder of instruments,
which in turn contain ohlc directory. For example:
/path:
├── EURUSD
│ ├── EURUSD-2016-06.zip
│ └── EURUSD-2016-07.zip
└── GBPUSD
├── GBPUSD-2016-06.zip
└── GBPUSD-2016-07.zip
Returns
-------
Yield (sid, dataframe)
Note
----
sid is index of insturment folder in the path. No special meaning.
"""
instruments = os.listdir(path) # get ["EURSD", "AUDUSD"]
for index, name in enumerate(instruments):
metadata.ix[index] = None, None, None, 'NYSE', name, name
current_dir = os.path.join(path, name)
zips = filter(lambda x: ".zip" in x, os.listdir(current_dir))
for z in zips:
zfile = zipfile.ZipFile(os.path.join(current_dir, z), 'r')
zfile.extractall(current_dir)
# ensure data is ingested chronologically
sorted_csv_name = sorted(os.listdir(current_dir), key=lambda x: (int(re.sub('\D', '', x)), x))
csvs = filter(lambda x: ".csv" in x, sorted_csv_name)
with maybe_show_progress(
csvs,
show_progress,
label='Ingesting csv stream for %s: ' % name):
for minute_csv in csvs:
ohlc = resample.bid_ask_to_ohlc(os.path.join(current_dir, minute_csv))
# Keep metadata updated
if metadata.ix[index, "start_date"] is pd.NaT or metadata.start_date.ix[index] > ohlc.index[0]:
metadata.ix[index, "start_date"] = ohlc.index[0]
if metadata.ix[index, "end_date"] is pd.NaT or metadata.end_date.ix[index] < ohlc.index[-1]:
metadata.ix[index, "end_date"] = ohlc.index[-1]
metadata.ix[index, "auto_close_date"] = ohlc.index[-1] + pd.Timedelta(days=1)
yield index, ohlc
minute_bar_writer.write(_minute_iter(path), show_progress)
asset_db_writer.write(equities=metadata)
adjustment_writer.write()
|
{
"content_hash": "0306e108b014b8609593c628f9ba7d3b",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 115,
"avg_line_length": 38.45161290322581,
"alnum_prop": 0.5687919463087249,
"repo_name": "bernoullio/toolbox",
"id": "aa05f50e48f848f6cafd5fb3bce14853bfda06d6",
"size": "4812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forex_toolbox/bid_ask_stream/bid_ask_stream.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "153307"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class ApplicationGatewaySku(Model):
"""SKU of an application gateway.
:param name: Name of an application gateway SKU. Possible values include:
'Standard_Small', 'Standard_Medium', 'Standard_Large', 'WAF_Medium',
'WAF_Large'
:type name: str or
~azure.mgmt.network.v2017_10_01.models.ApplicationGatewaySkuName
:param tier: Tier of an application gateway. Possible values include:
'Standard', 'WAF'
:type tier: str or
~azure.mgmt.network.v2017_10_01.models.ApplicationGatewayTier
:param capacity: Capacity (instance count) of an application gateway.
:type capacity: int
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'int'},
}
def __init__(self, *, name=None, tier=None, capacity: int=None, **kwargs) -> None:
super(ApplicationGatewaySku, self).__init__(**kwargs)
self.name = name
self.tier = tier
self.capacity = capacity
|
{
"content_hash": "9a560fa88cb86df2378d4635e4407fe7",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 86,
"avg_line_length": 36.2,
"alnum_prop": 0.6344383057090239,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "a569cbf9ef5d9a181c09a4bd4566dac791677aa1",
"size": "1560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/application_gateway_sku_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
import numpy as np
from pyspark.mllib.clustering import KMeans
import matplotlib.pyplot as plt
gpa_data = sc.textFile("../data/student.txt")
gpa_data.cache()
gpa_data.take(5)
# <codecell>
data = gpa_data.map(lambda row: row.split("\t")).map(lambda row: np.array([row[1], row[2]]))
data.take(5)
# <codecell>
model = KMeans.train(data, 4)
# <codecell>
model.centers
# <codecell>
model.clusterCenters
# <codecell>
x = data.map(lambda row: row[0]).collect()
y = data.map(lambda row: row[1]).collect()
plt.scatter(x, y)
plt.scatter([x[0] for x in model.centers], [x[1] for x in model.centers], s=90, c="red")
plt.show()
# <codecell>
|
{
"content_hash": "d244deb4c8557401fef592a1e612ca66",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 92,
"avg_line_length": 17.833333333333332,
"alnum_prop": 0.677570093457944,
"repo_name": "anantasty/spark-examples",
"id": "52fa2093e8121fca994768eab02400ea13d35aa4",
"size": "708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/kmeans.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15554"
},
{
"name": "Scala",
"bytes": "6142"
}
],
"symlink_target": ""
}
|
import soothsayer.urls
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^soothsayer/', include(soothsayer.urls)),
url(r'^admin/', include(admin.site.urls)),
)
|
{
"content_hash": "8f4b69a90ebf8020002faac47a1489a3",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 51,
"avg_line_length": 22.083333333333332,
"alnum_prop": 0.7169811320754716,
"repo_name": "shawnhermans/soothsayer",
"id": "10d3d1ff0f7cc746f6e5236ea0dce6df23e9dd6e",
"size": "265",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "mlserver/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "61523"
},
{
"name": "Shell",
"bytes": "6709"
}
],
"symlink_target": ""
}
|
"""
dyn_pages/user_upgrade.py
Author: Josh Williams
Date Added: Fri Mar 2 14:22:51 CST 2007
Give us yo money.
"""
## STD LIBS
## OUR LIBS
from zoto_base_page import zoto_base_page
## 3RD PARTY LIBS
class user_upgrade(zoto_base_page):
local_js_includes = [
"select_box.lib.js",
"subscribe.lib.js",
"states.js",
"countries.js"
]
page_manager_js = "managers/user_upgrade.js"
def locateChild(self, ctx, segments):
return self, []
|
{
"content_hash": "a79079aab87cf9b4e7c54ee7ada0141d",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 45,
"avg_line_length": 15.517241379310345,
"alnum_prop": 0.6755555555555556,
"repo_name": "kordless/zoto-server",
"id": "c7894bc41ed86c8d3a3b18e9ecf32df192f73fe9",
"size": "450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aztk/web/user_upgrade.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1489011"
},
{
"name": "PHP",
"bytes": "15394"
},
{
"name": "Python",
"bytes": "905967"
},
{
"name": "Shell",
"bytes": "1052"
}
],
"symlink_target": ""
}
|
import random
from queue_3_12 import Queue
class Printer:
def __init__(self, page_pre_minute):
self.page_rate = page_pre_minute
self.current_task = None
self.remain_task = 0
def printing(self):
"""TODO: Docstring for print.
:returns: TODO
remain_time 剩余时间 单位 秒
"""
if self.current_task:
self.remain_time = self.remain_time - 1
if self.remain_time <= 0:
self.current_task = None
def is_busy(self):
return self.current_task != None
def next(self, next_task):
self.current_task = next_task
self.remain_time = next_task.get_pages() * 60 / self.page_rate
# page = 15 => remain_time = 15 * 60 / (5/10) = 3min = 180s
class Task:
def __init__(self, time):
"""TODO: Docstring for __init__.
:returns: TODO
"""
self.start_time = time
self.pages = random.randrange(1, 21) # 1-20 pages length
def get_start_time(self):
"""TODO: Docstring for get_start_time.
:returns: TODO
"""
return self.start_time
def get_pages(self):
return self.pages
def get_wait_time(self, current_time):
return current_time - self.get_start_time()
def simulation(at_seconds, pages_pre_minute):
lab_printer = Printer(pages_pre_minute)
print_queue = Queue()
wait_time = []
for current_second in range(at_seconds):
if has_new_task():
task = Task(current_second)
print_queue.enqueue(task)
if not lab_printer.is_busy() and not print_queue.is_empty():
next_task = print_queue.dequeue()
wait_time.append(next_task.get_wait_time(current_second))
lab_printer.next(next_task)
lab_printer.printing()
average_wait_time = sum(wait_time) / len(wait_time)
print("Average Wait %6.2f secs %3d tasks remaining." % (average_wait_time, print_queue.size()))
def has_new_task():
return random.randrange(1, 181) == 180
for i in range(10):
simulation(3600, 5) #
print('rate changed to 10')
for i in range(10):
simulation(3600, 10) #
|
{
"content_hash": "48e5487f99afb23df5c6becf75f4fd5b",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 99,
"avg_line_length": 26.839506172839506,
"alnum_prop": 0.5827966881324746,
"repo_name": "sharkspeed/dororis",
"id": "d7a84bd53b3d9981621b503dd6bb9d15782a3d7e",
"size": "2263",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "algorithm/psads/3-chapter/2-queue/queue-3_14.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Agda",
"bytes": "152"
},
{
"name": "AppleScript",
"bytes": "4936"
},
{
"name": "Assembly",
"bytes": "6654"
},
{
"name": "C",
"bytes": "568507"
},
{
"name": "C#",
"bytes": "2446"
},
{
"name": "C++",
"bytes": "15567"
},
{
"name": "CSS",
"bytes": "74090"
},
{
"name": "Clojure",
"bytes": "986"
},
{
"name": "CoffeeScript",
"bytes": "1055"
},
{
"name": "Crystal",
"bytes": "13171"
},
{
"name": "Dart",
"bytes": "22343"
},
{
"name": "Elixir",
"bytes": "27938"
},
{
"name": "Fortran",
"bytes": "400"
},
{
"name": "Go",
"bytes": "117383"
},
{
"name": "HTML",
"bytes": "780346"
},
{
"name": "Haskell",
"bytes": "33977"
},
{
"name": "Idris",
"bytes": "167"
},
{
"name": "Java",
"bytes": "105613"
},
{
"name": "JavaScript",
"bytes": "1453348"
},
{
"name": "Kotlin",
"bytes": "24078"
},
{
"name": "Lex",
"bytes": "1156"
},
{
"name": "Makefile",
"bytes": "22596"
},
{
"name": "Mako",
"bytes": "1976"
},
{
"name": "Objective-C",
"bytes": "1500"
},
{
"name": "PHP",
"bytes": "868941"
},
{
"name": "Python",
"bytes": "553417"
},
{
"name": "Racket",
"bytes": "11698"
},
{
"name": "Roff",
"bytes": "3741"
},
{
"name": "Ruby",
"bytes": "129923"
},
{
"name": "Rust",
"bytes": "27692"
},
{
"name": "Scala",
"bytes": "791"
},
{
"name": "Shell",
"bytes": "17297"
},
{
"name": "Smarty",
"bytes": "421"
},
{
"name": "Swift",
"bytes": "197600"
},
{
"name": "TeX",
"bytes": "3875"
},
{
"name": "TypeScript",
"bytes": "24815"
},
{
"name": "Vim script",
"bytes": "6936"
},
{
"name": "Vue",
"bytes": "32921"
},
{
"name": "Zig",
"bytes": "634"
}
],
"symlink_target": ""
}
|
from test_framework import HivemindTestFramework
from hivemindrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
class EstimateFeeTest(HivemindTestFramework):
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir,
["-debug=mempool", "-debug=estimatefee", "-relaypriority=0"]))
# Node1 mines small-but-not-tiny blocks, and allows free transactions.
# NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
# so blockmaxsize of 2,000 is really just 1,000 bytes (room enough for
# 6 or 7 transactions)
self.nodes.append(start_node(1, self.options.tmpdir,
["-blockprioritysize=1500", "-blockmaxsize=2000",
"-debug=mempool", "-debug=estimatefee", "-relaypriority=0"]))
connect_nodes(self.nodes[1], 0)
# Node2 is a stingy miner, that
# produces very small blocks (room for only 3 or so transactions)
node2args = [ "-blockprioritysize=0", "-blockmaxsize=1500",
"-debug=mempool", "-debug=estimatefee", "-relaypriority=0"]
self.nodes.append(start_node(2, self.options.tmpdir, node2args))
connect_nodes(self.nodes[2], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
# Prime the memory pool with pairs of transactions
# (high-priority, random fee and zero-priority, random fee)
min_fee = Decimal("0.001")
fees_per_kb = [];
for i in range(12):
(txid, txhex, fee) = random_zeropri_transaction(self.nodes, Decimal("1.1"),
min_fee, min_fee, 20)
tx_kbytes = (len(txhex)/2)/1000.0
fees_per_kb.append(float(fee)/tx_kbytes)
# Mine blocks with node2 until the memory pool clears:
count_start = self.nodes[2].getblockcount()
while len(self.nodes[2].getrawmempool()) > 0:
self.nodes[2].setgenerate(True, 1)
self.sync_all()
all_estimates = [ self.nodes[0].estimatefee(i) for i in range(1,20) ]
print("Fee estimates, super-stingy miner: "+str([str(e) for e in all_estimates]))
# Estimates should be within the bounds of what transactions fees actually were:
delta = 1.0e-6 # account for rounding error
for e in filter(lambda x: x >= 0, all_estimates):
if float(e)+delta < min(fees_per_kb) or float(e)-delta > max(fees_per_kb):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"%(float(e), min_fee_kb, max_fee_kb))
# Generate transactions while mining 30 more blocks, this time with node1:
for i in range(30):
for j in range(random.randrange(6-4,6+4)):
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"),
Decimal("0.0"), min_fee, 20)
tx_kbytes = (len(txhex)/2)/1000.0
fees_per_kb.append(float(fee)/tx_kbytes)
self.nodes[1].setgenerate(True, 1)
self.sync_all()
all_estimates = [ self.nodes[0].estimatefee(i) for i in range(1,20) ]
print("Fee estimates, more generous miner: "+str([ str(e) for e in all_estimates]))
for e in filter(lambda x: x >= 0, all_estimates):
if float(e)+delta < min(fees_per_kb) or float(e)-delta > max(fees_per_kb):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"%(float(e), min_fee_kb, max_fee_kb))
# Finish by mining a normal-sized block:
while len(self.nodes[0].getrawmempool()) > 0:
self.nodes[0].setgenerate(True, 1)
self.sync_all()
final_estimates = [ self.nodes[0].estimatefee(i) for i in range(1,20) ]
print("Final fee estimates: "+str([ str(e) for e in final_estimates]))
if __name__ == '__main__':
EstimateFeeTest().main()
|
{
"content_hash": "bb4d8572375ea9c3ccdc10834168e7c3",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 114,
"avg_line_length": 48.93975903614458,
"alnum_prop": 0.5829640571147218,
"repo_name": "brishtiteveja/truthcoin-cpp",
"id": "e5524e5ca7bd5bcec294fef3c30045055a4748e0",
"size": "4304",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/smartfees.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "663859"
},
{
"name": "C++",
"bytes": "4138789"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "18103"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "4203"
},
{
"name": "Makefile",
"bytes": "63712"
},
{
"name": "Objective-C",
"bytes": "3161"
},
{
"name": "Objective-C++",
"bytes": "7278"
},
{
"name": "Protocol Buffer",
"bytes": "2312"
},
{
"name": "Python",
"bytes": "212197"
},
{
"name": "QMake",
"bytes": "33546"
},
{
"name": "Shell",
"bytes": "34948"
}
],
"symlink_target": ""
}
|
"""
data_plot.py
SuperClass module for the YProfile and Table Classes
It contains numerous plot function for the YProfile and Table Classes
If one in the future wants their class to inherit this superclasses methods
this is what is required:
A. Place 'from data_table import *' at the top of the module
B. If the class is defined like 'class MyClass:', change that to
'class MyClass(DataTable):'
C. To properly use DataTable's methods properly one will need these methods:
a get(atri) that returns a numpy array of Data, or a
list of numpy arrays of data. The arguments of this function would need to be
atri which is the name of the data one is looking for.
"""
from numpy import *
from math import *
import matplotlib.pylab as pyl
import matplotlib.pyplot as pl
from matplotlib import colors,cm
from matplotlib.patches import Rectangle, Arrow
from matplotlib.collections import PatchCollection
from matplotlib.offsetbox import AnchoredOffsetbox, TextArea
from matplotlib.lines import Line2D
from matplotlib.ticker import *
import os
import threading
import time
import sys
def padding_model_number(number,max_num):
''' this method returns a zero-front padded string
It makes out of str(45) -> '0045' if 999 < max_num < 10000. This
is meant to work for reasonable integers (maybe less than 10^6).
number number that the string should represent
max_num max number of cycle list, implies how many 0s have
be padded
'''
cnum = str(number)
clen = len(cnum)
cmax = int(log10(max_num)) + 1
return (cmax - clen)*'0' + cnum
class DataPlot():
def classTest(self):
'''
Determines what the type of class instance the subclass is, so
we can dynamically determine the behaviour of methods.
This method NEEDS to be modified if any names of files or classes
are changed
'''
c=str(self.__class__)
Tmp=''
if 'ppm.yprofile' == c:
tmp='YProfile'
elif 'ascii_table.ascii_table' == c:
tmp='AsciiTable'
elif 'nugridse.se' == c:
tmp='se'
elif 'mesa.mesa_profile' == c:
tmp='mesa_profile'
elif 'mesa.star_log' == c or 'mesa.history_data' == c:
tmp='mesa.star_log'
elif 'ppn.xtime' == c:
tmp='xtime'
elif 'ppn.abu_vector' == c:
tmp='PPN'
return tmp
def which(self, program):
'''
Mimics which in the unix shell
'''
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def logarithm(self,tmpX,tmpY,logX,logY,base):
logXER=False
logYER=False
for i in range(len(tmpX)):
if tmpX[i]<=0. and logX:
#print 'We can not log a number less than or equal to zero'
#print 'Attempting to remove incompatible values from X'
logXER=True
if tmpY[i]<=0. and logY:
#print 'We can not log a number less than or equal to zero'
#print 'Attempting to remove incompatible values from Y'
logYER=True
tmX=[]
tmY=[]
if logXER:
for i in range(len(tmpX)):
if tmpX[i]>0.:
tmX.append( tmpX[i])
tmY.append(tmpY[i])
tmpX=tmX
tmpY=tmY
elif logYER:
for i in range(len(tmpY)):
if tmpY[i]>0.:
tmX.append( tmpX[i])
tmY.append(tmpY[i])
tmpX=tmX
tmpY=tmY
tmX=tmpX
tmY=tmpY
if logX:
tmX=tmpX
try:
for i in range(len(tmpX)):
tmX[i]=log(tmpX[i],base)
except ValueError:
#print 'We can not log a number less than or equal to zero'
#print 'Attempting to remove incompatible values from X'
logXER=True
if logY:
tmY=tmpY
try:
for i in range(len(tmpY)):
tmY[i]=log(tmpY[i],base)
except ValueError:
#print 'We can not log a number less than or equal to zero'
#print 'Attempting to remove incompatible values from Y'
logYER=True
if logX:
tmpX=tmX
if logY:
tmpY=tmY
return tmpX,tmpY
def sparse(self,x,y,sparse):
"""
Method that removes every non sparse th element. For example
if this argument was 5, This method would plot the 0th, 5th, 10th
... elements.
Input:
x: list of x values, of lenthe j
y: list of y values, of lenthe j
sparse: Argument that skips every so many data points
"""
tmpX=[]
tmpY=[]
for i in range(len(x)):
if sparse == 1:
return x,y
if (i%sparse)==0:
tmpX.append(x[i])
tmpY.append(y[i])
return tmpX, tmpY
def plotMulti(self,atrix,atriy, cyclist,title,path='/',legend=None,labelx=None, labely=None,logx=False, logy=False, \
base=10,sparse=1,pdf=False,limits=None,):
'''
Method for plotting multiple plots and saving it to multiple pngs
or PDFs
Input:
atrix: The name of the attribute you want on the x axis
atriy: The name of the attribute you want on the Y axis
cyclist: List of cycles that you would like plotted
title: The title of the graph and the name of the file.
Legend: A list of legends for each of your cycles, or one legend
for all of the cycles
pdf: A boolean of if the image should be saved to a pdf file.
xMin,xMax, yMin, YMax: plot coordinates.
logx: A boolean of whether the user wants the x axis logarithmically
logy: A boolean of whether the user wants the Y axis logarithmically
base: The base of the logarithm. Default = 10
sparse: Argument that skips every so many data points. For
example if this argument was 5, This method would plot
the 0th, 5th, 10th ... elements.
limits: The length four list of the x and y limits. The order of
the list is xmin,xmax,ymin,ymax
'''
if str(legend.__class__)!="<type 'list'>":# Determines the legend is a list
legendList=False
else:
legendList=True
if legendList and len(cyclist) !=len(legend): #if it is a list, make sure there is an entry for each cycle
print 'Please input a proper legend, with correct length, aborting plot'
return None
for i in xrange(len(cyclist)):
if legendList:
self.plot(atrix,atriy,cyclist[i],'ndump',legend[i],labelx,labely,base=base,sparse=sparse, \
logx=logx,logy=logy,show=False,limits=limits)
else:
self.plot(atrix,atriy,cyclist[i],'ndump',legend,labelx,labely,base=base,sparse=sparse, \
logx=logx,logy=logy,show=False,limits=limits)
pl.title(title)
if not pdf:
currentDir = os.getcwd()
os.chdir(path)
pl.savefig(title+str(cyclist[i])+'.png', dpi=400)
os.chdir(currentDir)
else:
currentDir = os.getcwd()
os.chdir(path)
pl.savefig(title+cyclist[i]+'.pdf', dpi=400)
os.chdir(currentDir)
pl.clf()
return None
def plot(self,atrix,atriy, fname=None,numtype='ndump',legend=None,labelx=None, labely=None ,\
indexx=None, indexy=None, title=None, shape='.',logx=False, logy=False, base=10,sparse=1, \
show=True,limits=None,markevery=None,linewidth=1):
"""
Simple function that plots atriy as a function of atrix
This method will automatically find and plot the requested data.
Input:
atrix, The name of the attribute you want on the x axis
atriy, The name of the attribute you want on the Y axis
Fname: Be the filename, Ndump or time, or cycle, If fname is a
list, this method will then save a png for each cycle in
the list. Warning, this must be a list of cycles and not
a list of filenames
numtype: designates how this function acts and how it interprets
fname. Defaults to file
if numtype is 'file', this function will get the desird
attribute from that file
if numtype is 'NDump' function will look at the cycle with that
nDump
if numtype is 't' or 'time' function will find the _cycle with
the closest time stamp
labelx: The label on the X axis
labely: The label on the Y axis
indexx: Depreciated: If the get method returns a list of lists,
indexx would be the list at the index indexx in the list.
indexy: Depreciated: If the get method returns a list of lists,
indexy would be the list at the index indexx in the list.
shape: What shape and colour the user would like their plot in.
Please see
http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.plot
for all possible choices
title: The Title of the Graph
logx: A boolean of weather the user wants the x axis logarithmically
logy: A boolean of weather the user wants the Y axis logarithmically
base: The base of the logarithm. Default = 10
sparse: Argument that skips every so many data points. For
example if this argument was 5, This method would plot
the 0th, 5th, 10th ... elements.
show: A boolean of if the plot should be displayed useful with
the multiPlot method
WARNING: Unstable if get returns a list with only one element (x=[0])
limits: The length four list of the x and y limits. The order of
the list is xmin,xmax,ymin,ymax
markevery Set the markevery property to subsample the
plot when using markers. Eg if markevery=5,
every 5-th marker will be plotted. every can
be
None very point will be plotted
an integer N Every N-th marker will be plotted
starting with marker 0
every=(start, N) will start at point start and plot
linewidth set linewidth, default=1
"""
t1=time.time()
#Setting the axis labels
if labelx== None :
labelx=atrix
if labely== None :
labely=atriy
if title!=None:
title=title
else:
title=labely+' vs '+labelx
if str(fname.__class__)=="<type 'list'>":
self.plotMulti(atrix,atriy,fname,title,legend ,labelx,labely,logx, logy, 10,1,limits=limits)
return
tmpX=[]
tmpY=[]
singleX=False
singleY=False
#Getting data
plotType=self.classTest()
if plotType=='YProfile':
if fname==None:
fname=self.cycles[-1]
listY=self.get(atriy,fname, numtype,resolution='a')
listX=self.get(atrix,fname, numtype,resolution='a')
elif plotType=='se':
if fname==None:
listY=self.get( atriy,sparse=sparse)
listX=self.get(atrix,sparse=sparse)
else:
listY=self.get(fname, atriy,sparse=sparse)
listX=self.get(fname, atrix,sparse=sparse)
t2= time.time()
print t2 -t1
elif plotType=='PPN' :
if fname==None and atrix not in self.cattrs and atriy not in self.cattrs:
fname=len(self.files)-1
if numtype=='ndump':
numtype='cycNum'
listY=self.get(atriy,fname,numtype)
listX=self.get(atrix,fname,numtype)
elif plotType=='xtime' or plotType=='mesa_profile' or plotType=='AsciiTable' or plotType=='mesa.star_log':
listY=self.get(atriy)
listX=self.get(atrix)
else:
listY=self.get(atriy)
listX=self.get(atrix)
tmpX=[]
tmpY=[]
if isinstance(listX[0], basestring) or isinstance(listY[0], basestring):
for i in range(len(listX)):
if '*****' == listX[i] or '*****' == listY[i]:
print 'There seems to be a string of * in the lists'
print 'Cutting out elements in both the lists that have an index equal to or greater than the index of the location of the string of *'
break
tmpX.append(float(listX[i]))
tmpY.append(float(listY[i]))
listX=tmpX
listY=tmpY
#Determining if listX is a list or a list of lists
try:
j=listX[0][0]
except:
singleX = True
if len(listX) == 1: # If it is a list of lists with one element.
tmpX=listX[0]
elif singleX == True:# If it is a plain list of values.
tmpX=listX
elif indexx==None and len(listX)>1: # If it is a list of lists of values.
# take the largest
tmpX=listX[0]
for i in range(len(listX)):
if len(tmpX)<len(listX[i]):
tmpX=listX[i]
elif indexx<len(listX): # If an index is specified, use that index
tmpX=listX[indexx]
else:
print 'Sorry that indexx does not exist, returning None'
return None
#Determining if listY is a list or a list of lists
try:
j=listY[0][0]
except:
singleY = True
if len(listY) == 1: # If it is a list of lists with one element.
#print 'hello'
tmpY=listY[0]
elif singleY == True: # If it is a plain list of values.
#print 'world'
tmpY=listY
elif indexy==None and len(listY)>1:# If it is a list of lists of values.
# take the largest
#print 'fourth'
tmpY=listY[0]
for i in range(len(listY)):
if len(tmpY)<len(listY[i]):
tmpY=listY[i]
elif indexy<len(listY): # If an index is specified, use that index
#print 'sixth'
tmpY=listY[indexy]
else:
print 'Sorry that indexy does not exist, returning None'
return None
'''
elif indexy==None and len(listY)==1:
#print 'fifth'
tmpY=listY
'''
#Here, if we end up with different sized lists to plot, it
#searches for a list that is of an equal length
if len(tmpY)!=len(tmpX):
found=False
print "It seems like the lists are not of equal length"
print "Now attempting to find a compatible list for ListX"
for i in range(len(listY)):
if not singleY and len(tmpX)==len(listY[i]):
tmpY=listY[i]
found=True
if not found:
print "Now attempting to find a compatible list for ListY"
for i in range(len(listX)):
if not singleX and len(tmpY)==len(listX[i]):
tmpX=listX[i]
found=True
if found:
print "Suitable list found"
else:
print "There is no suitalble list, returning None"
return None
if len(tmpY)!=len(tmpX) and single == True:
print 'It seems that the selected lists are of different\nsize, now returning none'
return None
# Sparse stuff
if plotType!='se':
tmpX,tmpY=self.sparse(tmpX,tmpY, sparse)
# Logarithm stuff
if logy or logx:
tmpX,tmpY=self.logarithm(tmpX,tmpY,logx,logy,base)
# Here it ensures that if we are plotting ncycle no values of '*' will be plotted
tmX=[]
tmY=[]
for i in range(len(tmpX)):
tmX.append(str(tmpX[i]))
tmY.append(str(tmpY[i]))
tmpX=[]
tmpY=[]
for i in range(len(tmX)):
if '*' in tmX[i] or '*' in tmY[i]:
print 'There seems to be a string of * in the lists'
print 'Cutting out elements in both the lists that have an index equal to or greater than the index of the location of the string of *'
break
tmpX.append(float(tmX[i]))
tmpY.append(float(tmY[i]))
listX=tmpX
listY=tmpY
#Setting the axis labels
if logx:
labelx='log '+labelx
if logy:
labely='log '+labely
if legend!=None:
legend=legend
else:
legend=labely+' vs '+labelx
pl.plot(listX,listY,shape,label=legend,markevery=markevery,linewidth=linewidth)
pl.legend()
pl.title(title)
pl.xlabel(labelx)
pl.ylabel(labely)
if show:
pl.show()
if limits != None and len(limits)==4:
pl.xlim(limits[0],limits[1])
pl.ylim(limits[2],limits[3])
def plot_ratios(self,misosx,misosy,solsysx=None,solsysy=None,graindata=None,m_co=None,C_star_only=False,misosxname=None,misosyname=None,deltax=True,deltay=True,logx=False,logy=False,title=None,legend=True,extra_legend='',errbar=True,iniabufile='../../frames/mppnp/USEEPP/iniab2.0E-02GN93.ppn',plt_show=True,modlegend=None,plt_symb='o',plt_col='b',plt_modms=10.,plt_grms=7.,plt_modlw=3.,plt_sparse=0,plt_mrng=False,calling_routine='all',private_legend=None):
'''
Method for plotting ratio data from model output as well as grain data.
Important: You have to give some input to the routine!
RT, October 2012
graindata: presolar grain data -> private is for a private.txt database file, same structure as other files required!
misosx: model x data, set None for grain plot only
misosy: model y data
solsysx: solar system ratio of x-axis - only necessary if deltax=True, the two solsysx,y variable are necessary to avoid importing utils into DataPlot class. If you import it, mesa.py does not work anymore
solsysy: solar system ratio of y-axis - only necessary if deltay=True
m_co: model C/O ratio
deltax: Delta values on x-axis
deltay: Delta values on y-axis
logx: logarithmic x-axis
logy: logarithmic y-axis
title: Title of plot
legend: True or False. Use legend(loc=?) command to move legend around after plotting.
iniabufile: initial abundance file - necessary
plt_show: Show the plot or not, default is of course
modlegend: legend for model data
plt_symb: symbold for plotting model data
plt_modms: markersize for models
plt_grms: markersize for grains
plt_modlw: linewidht for models
plt_col: color for plotting model data
plt_sparse: sparse function for model data
plt_mrng: Plot mass range for massive stars (used by plot4iso_exp routine in nugridse)
calling_routine: to identify where it comes from for some special treatment
private_legend: If you want your own legend for the private file, then you're right here
'''
# compatibility
if misosxname != None and len(misosxname) == 2:
dumb = []
dumb = misosxname[0].split('-')
dumb.append(misosxname[1].split('-')[0])
dumb.append(misosxname[1].split('-')[1])
misosxname = dumb
if misosyname != None and len(misosyname) == 2:
dumb = []
dumb = misosyname[0].split('-')
dumb.append(misosyname[1].split('-')[0])
dumb.append(misosyname[1].split('-')[1])
misosyname = dumb
# style
# Size of font etc.
params = {'axes.labelsize': 20,
'text.fontsize': 14,
'legend.fontsize': 14,
'xtick.labelsize': 14,
'ytick.labelsize': 14}
pl.rcParams.update(params)
# prepare model data (if necessary) - here PPN / Nugridse difference if included at some point!
if m_co != None:
# find co_ratio, where it gets > 1
index = -1
for i in range(len(m_co)):
if m_co[i] >= 1.:
index = i
break
if index == -1: # star doesn't get C rich in TPs
mxdata_orich = misosx
mydata_orich = misosy
mxdata_crich = []
mydata_crich = []
else:
mxdata_orich = misosx[0:index+1]
mydata_orich = misosy[0:index+1]
mxdata_crich = misosx[index:len(misosx)]
mydata_crich = misosy[index:len(misosy)]
# plotting ifs, depending on available data
if graindata==None:
if m_co==None:
#markersize, and style stuff
# plot
if modlegend == None:
pl.plot(misosx,misosy,'o--')
else:
if calling_routine == 'general':
pl.plot(misosx,misosy,'o--',label=modlegend)
elif calling_routine == '4iso_exp':
plt_symb = plt_symb + '-'
for it in range(len(misosx)):
if it == 0:
pl.plot(misosx[it],misosy[it],plt_symb,color=plt_col,markevery=plt_sparse,markersize=plt_modms,lw=plt_modlw,label=modlegend)
else:
pl.plot(misosx[it],misosy[it],plt_symb,color=plt_col,markevery=plt_sparse,markersize=plt_modms,lw=plt_modlw)
# annotate mass range
if plt_mrng != False:
for mrng_ind in range(len(plt_mrng)):
pl.text(plt_mrng[mrng_ind][0],plt_mrng[mrng_ind][1],str(round(plt_mrng[mrng_ind][2],2)),ha='right',va='bottom',color=plt_col)
# axis
if logx and logy:
pl.loglog()
elif logx:
pl.semilogx()
elif logy:
pl.semilogy()
# labels
if misosxname != None:
if deltax:
pl.xlabel('$\delta$($^{' + str(misosxname[1]) + '}$' + misosxname[0] + '/$^{' + str(misosxname[3]) + '}$' + misosxname[2] + ')')
else:
pl.xlabel('$^{' + str(misosxname[1]) + '}$' + misosxname[0] + '/$^{' + str(misosxname[3]) + '}$' + misosxname[2])
if misosyname != None:
if deltay:
pl.ylabel('$\delta$($^{' + str(misosyname[1]) + '}$' + misosyname[0] + '/$^{' + str(misosyname[3]) + '}$' + misosyname[2] + ')')
else:
pl.ylabel('$^{' + str(misosyname[1]) + '}$' + misosyname[0] + '/$^{' + str(misosyname[3]) + '}$' + misosyname[2])
if title != None:
pl.title(title)
if legend != None or modlegend != None:
pl.legend(loc=5)
else:
# plot
if C_star_only ==False:
pl.plot(mxdata_orich,mydata_orich,linestyle='--',marker=plt_symb,c=plt_col,label='C/O$<$1'+extra_legend)
pl.plot(mxdata_crich,mydata_crich,'o-',marker=plt_symb,c=plt_col,label='C/O$>$1'+extra_legend,lw=plt_modlw,markersize=plt_modms,markeredgecolor='k',markerfacecolor=plt_col)
# axis
if logx and logy:
pl.loglog()
elif logx:
pl.semilogx()
elif logy:
pl.semilogy()
# labels
if misosxname != None:
if deltax:
pl.xlabel('$\delta$($^{' + str(misosxname[1]) + '}$' + misosxname[0] + '/$^{' + str(misosxname[3]) + '}$' + misosxname[2] + ')')
else:
pl.xlabel('$^{' + str(misosxname[1]) + '}$' + misosxname[0] + '/$^{' + str(misosxname[3]) + '}$' + misosxname[2])
if misosyname != None:
if deltay:
pl.ylabel('$\delta$($^{' + str(misosyname[1]) + '}$' + misosyname[0] + '/$^{' + str(misosyname[3]) + '}$' + misosyname[2] + ')')
else:
pl.ylabel('$^{' + str(misosyname[1]) + '}$' + misosyname[0] + '/$^{' + str(misosyname[3]) + '}$' + misosyname[2])
if title != None:
pl.title(title)
if legend:
pl.legend(loc=5)
else:
# transform data
gtypelist = graindata[0]
gdatax = graindata[1]
gdataxerr = graindata[2]
gdatay = graindata[3]
gdatayerr = graindata[4]
### PLOTS ###
# grains
for i in range(len(gtypelist)):
# determine plot symbols and color
if gtypelist[i][0].lower() == 'sic':
msymb = 'o'
if gtypelist[i][1] == 'M':
mcol = '0.4'
elif gtypelist[i][1] == 'X':
mcol = 'b'
elif gtypelist[i][1] == 'Y':
mcol = 'g'
elif gtypelist[i][1] == 'Z':
mcol = 'r'
elif gtypelist[i][1] == 'AB':
mcol = 'c'
elif gtypelist[i][1] == 'C' or gtypelist[i][1] == 'U/C':
mcol = 'y'
elif gtypelist[i][1] == 'N':
mcol = 'm'
else:
mcol = '0.7'
elif gtypelist[i][0].lower() == 'oxides':
msymb = '^'
if gtypelist[i][1] == '1':
mcol = '0.4'
elif gtypelist[i][1] == '2':
mcol = 'b'
elif gtypelist[i][1] == '3':
mcol = 'g'
elif gtypelist[i][1] == '4':
mcol = 'r'
else:
mcol = '0.7'
elif gtypelist[i][0].lower() == 'silicates':
msymb = 'v'
if gtypelist[i][1] == '1':
mcol = '0.4'
elif gtypelist[i][1] == '2':
mcol = 'b'
elif gtypelist[i][1] == '3':
mcol = 'g'
elif gtypelist[i][1] == '4':
mcol = 'r'
else:
mcol = '0.7'
elif gtypelist[i][0].lower() == 'graphites':
msymb = 's'
if gtypelist[i][1] == 'HD':
mcol = '0.4'
elif gtypelist[i][1] == 'LD':
mcol = 'b'
else:
mcol = '0.7'
elif gtypelist[i][0].lower() == 'idp':
msymb = '>'
mcol = '0.4'
elif gtypelist[i][0].lower() == 'private':
msymb = 'o'
if gtypelist[i][1] == 'M':
mcol = '0.4'
elif gtypelist[i][1] == 'X':
mcol = 'b'
elif gtypelist[i][1] == 'Y':
mcol = 'g'
elif gtypelist[i][1] == 'Z':
mcol = 'r'
elif gtypelist[i][1] == 'AB':
mcol = 'c'
elif gtypelist[i][1] == 'C' or gtypelist[i][1] == 'U/C':
mcol = 'y'
elif gtypelist[i][1] == 'N':
mcol = 'm'
else:
mcol = '0.7'
else:
msymb = '+'
mcol = '0.4'
# make nice labels in gtypelist now
for jt in range(len(gtypelist[i])):
if gtypelist[i][jt] == 'sic':
gtypelist[i][jt] = 'SiC'
elif gtypelist[i][jt] == 'oxides':
gtypelist[i][jt] = 'Oxides'
elif gtypelist[i][jt] == 'silicates':
gtypelist[i][jt] = 'Silicated'
elif gtypelist[i][jt] == 'graphites':
gtypelist[i][jt] = 'Graphites'
elif gtypelist[i][jt] == 'N':
gtypelist[i][jt] = 'Nova'
elif gtypelist[i][jt] == 'M':
gtypelist[i][jt] = 'Mainstream'
elif gtypelist[i][jt] == 'U':
gtypelist[i][jt] = 'Unclassified'
elif gtypelist[i][jt] == 'LD':
gtypelist[i][jt] = 'low density'
elif gtypelist[i][jt] == 'HD':
gtypelist[i][jt] = 'high density'
elif gtypelist[i][jt] == 'idp':
gtypelist[i][jt] = 'IDP'
elif gtypelist[i][jt] == 'private':
if private_legend==None:
gtypelist[i][jt] = 'Private'
else:
gtypelist[i][jt]=private_legend
# now plot the grain data!
# IDP grain labels properly
if gtypelist[i][0] == 'IDP':
grainlabelclean = 'IDP'
else:
grainlabelclean = gtypelist[i][0] + ' ' + gtypelist[i][1]
if errbar:
pl.errorbar(gdatax[i],gdatay[i],xerr=gdataxerr[i],yerr=gdatayerr[i],marker=msymb,c=mcol,linestyle='')
pl.plot(gdatax[i],gdatay[i],msymb,c=mcol,label=grainlabelclean,markersize=plt_grms)
else:
pl.plot(gdatax[i],gdatay[i],msymb,c=mcol,label=grainlabelclean,markersize=plt_grms)
# plot model:
if m_co != None and misosx!=None:
# actual plotting
if C_star_only==False:
pl.plot(mxdata_orich,mydata_orich,'--',marker=plt_symb,c=plt_col,label='C/O$<$1'+extra_legend,lw=plt_modlw)
pl.plot(mxdata_crich,mydata_crich,'*-',marker=plt_symb,c=plt_col,label='C/O$>$1'+extra_legend,lw=plt_modlw,markersize=plt_modms,markeredgecolor='k',markerfacecolor=plt_col)
elif misosx != None:
if modlegend != None:
if calling_routine == 'general':
pl.plot(misosx,misosy,'o--',label=modlegend,markersize=plt_modms,lw=plt_modlw)
elif calling_routine == '4iso_exp':
plt_symb = plt_symb + '-'
for it in range(len(misosx)):
if it == 0:
pl.plot(misosx[it],misosy[it],plt_symb,color=plt_col,markevery=plt_sparse,markersize=plt_modms,lw=plt_modlw,label=modlegend)
else:
pl.plot(misosx[it],misosy[it],plt_symb,color=plt_col,markevery=plt_sparse,markersize=plt_modms,lw=plt_modlw)
# annotate mass range
if plt_mrng != False:
for mrng_ind in range(len(plt_mrng)):
pl.text(plt_mrng[mrng_ind][0],plt_mrng[mrng_ind][1],str(round(plt_mrng[mrng_ind][2],2)),ha='right',va='bottom',color=plt_col)
else:
pl.plot(misosx,misosy,'o--',markersize=plt_modms,lw=plt_modlw)
# axis
if logx and logy:
pl.loglog()
elif logx:
pl.semilogx()
elif logy:
pl.semilogy()
# labels
if misosxname != None:
if deltax:
pl.xlabel('$\delta$($^{' + str(misosxname[1]) + '}$' + misosxname[0] + '/$^{' + str(misosxname[3]) + '}$' + misosxname[2] + ')')
else:
pl.xlabel('$^{' + str(misosxname[1]) + '}$' + misosxname[0] + '/$^{' + str(misosxname[3]) + '}$' + misosxname[2])
if misosyname != None:
if deltay:
pl.ylabel('$\delta$($^{' + str(misosyname[1]) + '}$' + misosyname[0] + '/$^{' + str(misosyname[3]) + '}$' + misosyname[2] + ')')
else:
pl.ylabel('$^{' + str(misosyname[1]) + '}$' + misosyname[0] + '/$^{' + str(misosyname[3]) + '}$' + misosyname[2])
if title != None:
pl.title(title)
if legend:
pl.legend(loc=5)
# plot horizontal and vertical lines
if deltax:
pl.axhline(0,color='k')
else:
pl.axhline(solsysy,color='k')
if deltay:
pl.axvline(0,color='k')
else:
pl.axvline(solsysx,color='k')
if plt_show:
pl.show()
def clear(self, title=True, xlabel=True, ylabel=True):
'''
Method for removing the title and/or xlabel and/or Ylabel
input:
Title - boolean of if title will be cleared
xlabel - boolean of if xlabel will be cleared
ylabel - boolean of if ylabel will be cleared
'''
if title:
pyl.title('')
if xlabel:
pyl.xlabel('')
if ylabel:
pyl.ylabel('')
# From mesa.py
def xlimrev(self):
''' reverse xrange'''
xmax,xmin=pyl.xlim()
pyl.xlim(xmin,xmax)
def abu_chartMulti(self,cyclist, mass_range=None ,ilabel = True,imlabel = True,\
imagic = False,boxstable=True,lbound=20,plotaxis=[0,0,0,0],\
color_map='jet',pdf=False,title=None):
'''
Method that plots abundence chart and saves those
figures to a .png file (by default). Plots a figure
for each cycle in the argument cycle
input:
cyclist: The list of cycles we are plotting
ilabel: elemental labels off/on [0/1]
imlabel: label for isotopic masses off/on [0/1]
imagic: turn lines for magic numbers off/on [0/1]
plotaxis: Set axis limit: If default [0,0,0,0] the complete
range in (N,Z) will be plotted
format: What format will this be saved in ['pdf'/'png']
title: The title of the plots and the saved images
'''
if self.which('dvipng')==None:
print "This method may need the third party program dvipng to operate"
print 'It is located at http://sourceforge.net/projects/dvipng/'
max_num = max(cyclist)
for i in xrange(len(cyclist)):
self.abu_chart( cyclist[i], mass_range ,ilabel,imlabel,imagic,\
boxstable,lbound,plotaxis,False,color_map)
if title !=None:
pl.title(title)
else:
name='AbuChart'
number_str=padding_model_number(cyclist[i],max_num)
if not pdf:
pl.savefig(name+number_str+'.png', dpi=100)
else:
pl.savefig(name+number_str+'.pdf', dpi=200)
pl.close()
return None
#from mppnp.se
def abu_chart(self, cycle, mass_range=None ,ilabel = True,imlabel = True,imagic = False,
boxstable=True,lbound=20,plotaxis=[0,0,0,0],show=True,color_map='jet',ifig=None):
'''
Plots an abundance chart
input:
cycle: The cycle we are looking in. It it is a list of cycles,
this method will then do a plot for each of these cycles
and save them all to a file
ilabel: elemental labels off/on [False/True] defaults to True
imlabel: label for isotopic masses off/on [False/True], defaults to True
imagic: turn lines for magic numbers off/on [False/True] defaults to False
boxstable: plot the black boxes around the stable elements,
defaults to true
lbound: The lower bound of the colour spectrum plotted. Defaults
to 20
plotaxis: Set axis limit: If default [0,0,0,0] the complete
range in (N,Z) will be plotted. It equates to
[xMin,xMax,Ymin,Ymax]
mass_range - a 1x2 array containing the lower and upper mass range.
If this is an instance of abu_vector this will
only plot isotopes that have an atomic mass
within this range. This will throw an error if
this range does not make sence ie [45,2]
if None, it will plot over the entire range
Defaults to None
show: boolean of if the plot should be displayed useful with
saving multiple plots using abu_chartMulti
color_map color map according to choices in matplotlib
(e.g. www.scipy.org/Cookbook/Matplotlib/Show_colormaps)
ifig figure number, defaults to cycle number
'''
if ifig == None:
ifig=cycle
if str(cycle.__class__)=="<type 'list'>":
self.abu_chartMulti(cycle, mass_range,ilabel,imlabel,imagic,boxstable,\
lbound,plotaxis,color_map)
return
plotType=self.classTest()
if mass_range!=None and mass_range[0]>mass_range[1]:
print 'Please input a proper mass range'
print 'Returning None'
return None
if plotType=='se':
cycle=self.se.findCycle(cycle)
nin=zeros(len(self.se.A))
zin=zeros(len(self.se.Z))
for i in xrange(len(nin)):
nin[i]=self.se.A[i]
zin[i]=self.se.Z[i]
for i in xrange(len(nin)):
nin[i]=nin[i]-zin[i]
yin=self.get(cycle, 'iso_massf')
isom=self.se.isomeric_states
masses = self.se.get(cycle,'mass')
if mass_range != None:
masses = self.se.get(cycle,'mass')
masses.sort()
if mass_range != None:
tmpyps=[]
masses = self.se.get(cycle,'mass')
masses = self.se.get(cycle,'mass')
masses.sort()
for i in xrange(len(masses)):
if (masses[i] >mass_range[0] and masses[i]<mass_range[1]) or\
(masses[i]==mass_range[0] or masses[i]==mass_range[1]):
tmpyps.append(yin[i])
yin=tmpyps
tmp=zeros(len(yin[0]))
for i in xrange(len(yin)):
for j in xrange(len(yin[i])):
tmp[j]+=yin[i][j]
tmp=tmp/len(yin)
yin=tmp
elif plotType=='PPN':
ain=self.get('A',cycle)
zin=self.get('Z',cycle)
nin=ain-zin
yin=self.get('ABUNDANCE_MF',cycle)
isom=self.get('ISOM',cycle)
if mass_range != None:
tmpA=[]
tmpZ=[]
tmpIsom=[]
tmpyps=[]
for i in xrange(len(nin)):
if (ain[i] >mass_range[0] and ain[i]<mass_range[1])\
or (ain[i]==mass_range[0] or ain[i]==mass_range[1]):
tmpA.append(nin[i])
tmpZ.append(zin[i])
tmpIsom.append(isom[i])
tmpyps.append(yin[i])
zin=tmpZ
nin=tmpA
yin=tmpyps
isom=tmpIsom
else:
print 'This method, abu_chart, is not supported by this class'
print 'Returning None'
return None
# in case we call from ipython -pylab, turn interactive on at end again
turnoff=False
if not show:
try:
ioff()
turnoff=True
except NameError:
turnoff=False
nnmax = int(max(nin))+1
nzmax = int(max(zin))+1
nzycheck = zeros([nnmax,nzmax,3])
for i in range(len(nin)):
if isom[i]==1:
ni = int(nin[i])
zi = int(zin[i])
nzycheck[ni,zi,0] = 1
nzycheck[ni,zi,1] = yin[i]
#######################################################################
# elemental names: elname(i) is the name of element with Z=i
elname=self.elements_names
#### create plot
## define axis and plot style (colormap, size, fontsize etc.)
if plotaxis==[0,0,0,0]:
xdim=10
ydim=6
else:
dx = plotaxis[1]-plotaxis[0]
dy = plotaxis[3]-plotaxis[2]
ydim = 6
xdim = ydim*dx/dy
params = {'axes.labelsize': 15,
'text.fontsize': 12,
'legend.fontsize': 15,
'xtick.labelsize': 15,
'ytick.labelsize': 15,
'text.usetex': True}
#pl.rcParams.update(params) #May cause Error, someting to do with tex
fig=pl.figure(ifig,figsize=(xdim,ydim),dpi=100)
axx = 0.10
axy = 0.10
axw = 0.85
axh = 0.8
ax=pl.axes([axx,axy,axw,axh])
# Tick marks
xminorlocator = MultipleLocator(1)
xmajorlocator = MultipleLocator(5)
ax.xaxis.set_major_locator(xmajorlocator)
ax.xaxis.set_minor_locator(xminorlocator)
yminorlocator = MultipleLocator(1)
ymajorlocator = MultipleLocator(5)
ax.yaxis.set_major_locator(ymajorlocator)
ax.yaxis.set_minor_locator(yminorlocator)
# color map choice for abundances
cmapa = cm.get_cmap(name=color_map)
# color map choice for arrows
cmapr = cm.autumn
# if a value is below the lower limit its set to white
cmapa.set_under(color='w')
cmapr.set_under(color='w')
# set value range for abundance colors (log10(Y))
norma = colors.Normalize(vmin=-lbound,vmax=0)
# set x- and y-axis scale aspect ratio to 1
ax.set_aspect('equal')
#print time,temp and density on top
temp = ' '#'%8.3e' %ff['temp']
time = ' '#'%8.3e' %ff['time']
dens = ' '#'%8.3e' %ff['dens']
#May cause Error, someting to do with tex
'''
#box1 = TextArea("t : " + time + " s~~/~~T$_{9}$ : " + temp + "~~/~~$\\rho_{b}$ : " \
# + dens + ' g/cm$^{3}$', textprops=dict(color="k"))
anchored_box = AnchoredOffsetbox(loc=3,
child=box1, pad=0.,
frameon=False,
bbox_to_anchor=(0., 1.02),
bbox_transform=ax.transAxes,
borderpad=0.,
)
ax.add_artist(anchored_box)
'''
## Colour bar plotted
patches = []
color = []
for i in range(nzmax):
for j in range(nnmax):
if nzycheck[j,i,0]==1:
xy = j-0.5,i-0.5
rect = Rectangle(xy,1,1,)
# abundance
yab = nzycheck[j,i,1]
if yab == 0:
yab=1e-99
col =log10(yab)
patches.append(rect)
color.append(col)
p = PatchCollection(patches, cmap=cmapa, norm=norma)
p.set_array(array(color))
p.set_zorder(1)
ax.add_collection(p)
cb = pl.colorbar(p)
# colorbar label
cb.set_label('log$_{10}$(X)',fontsize='x-large')
# plot file name
graphname = 'abundance-chart'+str(cycle)
# Add black frames for stable isotopes
if boxstable:
for i in xrange(len(self.stable_el)):
if i == 0:
continue
tmp = self.stable_el[i]
try:
zz= self.elements_names.index(tmp[0]) #charge
except:
continue
for j in xrange(len(tmp)):
if j == 0:
continue
nn = int(tmp[j]) #atomic mass
nn=nn-zz
xy = nn-0.5,zz-0.5
rect = Rectangle(xy,1,1,ec='k',fc='None',fill='False',lw=3.)
rect.set_zorder(2)
ax.add_patch(rect)
# decide which array to take for label positions
iarr = 0
# plot element labels
if ilabel:
for z in range(nzmax):
try:
nmin = min(argwhere(nzycheck[:,z,iarr]))[0]-1
ax.text(nmin,z,elname[z],horizontalalignment='center',verticalalignment='center',\
fontsize='x-small',clip_on=True)
except ValueError:
continue
# plot mass numbers
if imlabel:
for z in range(nzmax):
for n in range(nnmax):
a = z+n
if nzycheck[n,z,iarr]==1:
ax.text(n,z,a,horizontalalignment='center',verticalalignment='center',\
fontsize='xx-small',clip_on=True)
# plot lines at magic numbers
if imagic:
ixymagic=[2, 8, 20, 28, 50, 82, 126]
nmagic = len(ixymagic)
for magic in ixymagic:
if magic<=nzmax:
try:
xnmin = min(argwhere(nzycheck[:,magic,iarr]))[0]
xnmax = max(argwhere(nzycheck[:,magic,iarr]))[0]
line = ax.plot([xnmin,xnmax],[magic,magic],lw=3.,color='r',ls='-')
except ValueError:
dummy=0
if magic<=nnmax:
try:
yzmin = min(argwhere(nzycheck[magic,:,iarr]))[0]
yzmax = max(argwhere(nzycheck[magic,:,iarr]))[0]
line = ax.plot([magic,magic],[yzmin,yzmax],lw=3.,color='r',ls='-')
except ValueError:
dummy=0
# set axis limits
if plotaxis==[0,0,0,0]:
xmax=max(nin)
ymax=max(zin)
ax.axis([-0.5,xmax+0.5,-0.5,ymax+0.5])
else:
ax.axis(plotaxis)
# set x- and y-axis label
ax.set_xlabel('neutron number (A-Z)',fontsize=14)
ax.set_ylabel('proton number Z',fontsize=14)
pl.title('Isotopic Chart for cycle '+str(int(cycle)))
#fig.savefig(graphname)
print graphname,'is done'
if show:
pl.show()
if turnoff:
ion()
return
def abu_flux_chart(self, cycle,ilabel = True,imlabel = True,imagic = False,
boxstable=True,lbound=15,plotaxis=[0,0,0,0],which_flux=None,prange=None,profile='charged',show=True):
'''
Plots an abundance and flux chart
input:
cycle: The cycle we are looking in. It it is a list of cycles,
this method will then do a plot for each of theses cycles
and save them all to a file
ilabel: elemental labels off/on [False/True] defaults to True
imlabel: label for isotopic masses off/on [False/True], defaults to True
imagic: turn lines for magic numbers off/on [False/True] defaults to False
boxstable: plot the black boxes around the stable elements,
defaults to true
lbound: The lower bound of the colour spectrum ploted. Defaults
to 20
plotaxis: Set axis limit: If default [0,0,0,0] the complete
range in (N,Z) will be plotted. It equates to
[xMin,xMax,Ymin,Ymax]
which_flux = 0 is for nucleosynthesis flux plot, which_flux = 1 is for energy flux plot, None is default, that is option 0.
prange is the range of fluxes to be considered.
format: What format will this be saved in ['pdf'/'png']
Defaults to None
profile: 'charged' is ideal setting to show charged particle reactions flow.
'neutron' is ideal setting for neutron captures flows.
show: boolean of if the plot should be displayed useful with
saving multiple plots using abu_chartMulti
'''
#######################################################################
#### plot options
# Set axis limit: If default [0,0,0,0] the complete range in (N,Z) will
# be plotted, i.e. all isotopes, else specify the limits in
# plotaxis = [xmin,xmax,ymin,ymax]
#######################################################################
# read data file
#inpfile = cycle
#ff = fdic.ff(inpfile)
# with the flux implementation I am not using mass range for now.
# It may be introduced eventually.
mass_range = None
if str(cycle.__class__)=="<type 'list'>":
self.abu_chartMulti(cycle, mass_range,ilabel,imlabel,imagic,boxstable,\
lbound,plotaxis)
return
plotType=self.classTest()
#if mass_range!=None and mass_range[0]>mass_range[1]:
#print 'Please input a proper mass range'
#print 'Returning None'
#return None
if plotType=='se':
cycle=self.se.findCycle(cycle)
nin=zeros(len(self.se.A))
zin=zeros(len(self.se.Z))
for i in xrange(len(nin)):
nin[i]=self.se.A[i]
zin[i]=self.se.Z[i]
for i in xrange(len(nin)):
nin[i]=nin[i]-zin[i]
yin=self.get(cycle, 'iso_massf')
isom=self.se.isomeric_states
masses = self.se.get(cycle,'mass')
if mass_range != None:
masses = self.se.get(cycle,'mass')
masses.sort()
if mass_range != None:
tmpyps=[]
masses = self.se.get(cycle,'mass')
masses = self.se.get(cycle,'mass')
masses.sort()
for i in xrange(len(masses)):
if (masses[i] >mass_range[0] and masses[i]<mass_range[1]) or\
(masses[i]==mass_range[0] or masses[i]==mass_range[1]):
tmpyps.append(yin[i])
yin=tmpyps
tmp=zeros(len(yin[0]))
for i in xrange(len(yin)):
for j in xrange(len(yin[i])):
tmp[j]+=yin[i][j]
tmp=tmp/len(yin)
yin=tmp
elif plotType=='PPN':
ain=self.get('A',cycle)
zin=self.get('Z',cycle)
nin=ain-zin
yin=self.get('ABUNDANCE_MF',cycle)
isom=self.get('ISOM',cycle)
if mass_range != None:
tmpA=[]
tmpZ=[]
tmpIsom=[]
tmpyps=[]
for i in xrange(len(nin)):
if (ain[i] >mass_range[0] and ain[i]<mass_range[1])\
or (ain[i]==mass_range[0] or ain[i]==mass_range[1]):
tmpA.append(nin[i])
tmpZ.append(zin[i])
tmpIsom.append(isom[i])
tmpyps.append(yin[i])
zin=tmpZ
nin=tmpA
yin=tmpyps
isom=tmpIsom
else:
print 'This method, abu_chart, is not supported by this class'
print 'Returning None'
return None
# in case we call from ipython -pylab, turn interactive on at end again
turnoff=False
if not show:
try:
ioff()
turnoff=True
except NameError:
turnoff=False
nnmax = int(max(nin))+1
nzmax = int(max(zin))+1
nnmax_plot = nnmax
nzmax_plot = nzmax
nzycheck = zeros([nnmax,nzmax,3])
nzycheck_plot = zeros([nnmax,nzmax,3])
for i in range(len(nin)):
if isom[i]==1:
ni = int(nin[i])
zi = int(zin[i])
nzycheck[ni,zi,0] = 1
nzycheck[ni,zi,1] = yin[i]
nzycheck_plot[ni,zi,0] = 1
#######################################################################
# elemental names: elname(i) is the name of element with Z=i
elname=self.elements_names
#### create plot
## define axis and plot style (colormap, size, fontsize etc.)
if plotaxis==[0,0,0,0]:
xdim=10
ydim=6
else:
dx = plotaxis[1]-plotaxis[0]
dy = plotaxis[3]-plotaxis[2]
ydim = 6
xdim = ydim*dx/dy
params = {'axes.labelsize': 15,
'text.fontsize': 12,
'legend.fontsize': 15,
'xtick.labelsize': 15,
'ytick.labelsize': 15,
'text.usetex': True}
#pl.rcParams.update(params) #May cause Error, someting to do with tex
#fig=pl.figure(figsize=(xdim,ydim),dpi=100)
fig=pl.figure()
if profile == 'charged':
ax1 = fig.add_subplot(1, 2, 1)
elif profile == 'neutron':
ax1 = fig.add_subplot(2, 1, 1)
#axx = 0.10
#axy = 0.10
#axw = 0.85
#axh = 0.8
#ax1=pl.axes([axx,axy,axw,axh])
# Tick marks
xminorlocator = MultipleLocator(1)
xmajorlocator = MultipleLocator(5)
ax1.xaxis.set_major_locator(xmajorlocator)
ax1.xaxis.set_minor_locator(xminorlocator)
yminorlocator = MultipleLocator(1)
ymajorlocator = MultipleLocator(5)
ax1.yaxis.set_major_locator(ymajorlocator)
ax1.yaxis.set_minor_locator(yminorlocator)
# color map choice for abundances
#cmapa = cm.jet
cmapa = cm.summer
# color map choice for arrows
cmapr = cm.summer
# if a value is below the lower limit its set to white
cmapa.set_under(color='w')
cmapr.set_under(color='w')
# set value range for abundance colors (log10(Y))
norma = colors.Normalize(vmin=-lbound,vmax=0)
# set x- and y-axis scale aspect ratio to 1
#ax1.set_aspect('equal')
#print time,temp and density on top
temp = ' '#'%8.3e' %ff['temp']
time = ' '#'%8.3e' %ff['time']
dens = ' '#'%8.3e' %ff['dens']
#May cause Error, someting to do with tex
'''
#box1 = TextArea("t : " + time + " s~~/~~T$_{9}$ : " + temp + "~~/~~$\\rho_{b}$ : " \
# + dens + ' g/cm$^{3}$', textprops=dict(color="k"))
anchored_box = AnchoredOffsetbox(loc=3,
child=box1, pad=0.,
frameon=False,
bbox_to_anchor=(0., 1.02),
bbox_transform=ax.transAxes,
borderpad=0.,
)
ax.add_artist(anchored_box)
'''
## Colour bar plotted
patches = []
color = []
for i in range(nzmax):
for j in range(nnmax):
if nzycheck[j,i,0]==1:
xy = j-0.5,i-0.5
rect = Rectangle(xy,1,1,)
# abundance
yab = nzycheck[j,i,1]
if yab == 0:
yab=1e-99
col =log10(yab)
patches.append(rect)
color.append(col)
p = PatchCollection(patches, cmap=cmapa, norm=norma)
p.set_array(array(color))
p.set_zorder(1)
ax1.add_collection(p)
cb = pl.colorbar(p)
# colorbar label
if profile == 'neutron':
cb.set_label('log$_{10}$(X)',fontsize='x-large')
# plot file name
graphname = 'abundance-flux-chart'+str(cycle)
# Add black frames for stable isotopes
if boxstable:
for i in xrange(len(self.stable_el)):
if i == 0:
continue
tmp = self.stable_el[i]
try:
zz= self.elements_names.index(tmp[0]) #charge
except:
continue
for j in xrange(len(tmp)):
if j == 0:
continue
nn = int(tmp[j]) #atomic mass
nn=nn-zz
xy = nn-0.5,zz-0.5
rect = Rectangle(xy,1,1,ec='k',fc='None',fill='False',lw=4.)
rect.set_zorder(2)
ax1.add_patch(rect)
# decide which array to take for label positions
iarr = 0
# plot element labels
if ilabel:
for z in range(nzmax):
try:
nmin = min(argwhere(nzycheck[:,z,iarr]))[0]-1
nmax = max(argwhere(nzycheck[:,z,iarr]))[0]+1
ax1.text(nmin,z,elname[z],horizontalalignment='center',verticalalignment='center',\
fontsize='medium',clip_on=True)
ax1.text(nmax,z,elname[z],horizontalalignment='center',verticalalignment='center',\
fontsize='medium',clip_on=True)
except ValueError:
continue
# plot mass numbers
if imlabel:
for z in range(nzmax):
for n in range(nnmax):
a = z+n
if nzycheck[n,z,iarr]==1:
ax1.text(n,z,a,horizontalalignment='center',verticalalignment='center',\
fontsize='xx-small',clip_on=True)
# plot lines at magic numbers
if imagic:
ixymagic=[2, 8, 20, 28, 50, 82, 126]
nmagic = len(ixymagic)
for magic in ixymagic:
if magic<=nzmax:
try:
xnmin = min(argwhere(nzycheck[:,magic,iarr]))[0]
xnmax = max(argwhere(nzycheck[:,magic,iarr]))[0]
line = ax1.plot([xnmin,xnmax],[magic,magic],lw=3.,color='r',ls='-')
except ValueError:
dummy=0
if magic<=nnmax:
try:
yzmin = min(argwhere(nzycheck[magic,:,iarr]))[0]
yzmax = max(argwhere(nzycheck[magic,:,iarr]))[0]
line = ax1.plot([magic,magic],[yzmin,yzmax],lw=3.,color='r',ls='-')
except ValueError:
dummy=0
# set axis limits
if plotaxis==[0,0,0,0]:
xmax=max(nin)
ymax=max(zin)
ax1.axis([-0.5,xmax+0.5,-0.5,ymax+0.5])
else:
ax1.axis(plotaxis)
# set x- and y-axis label
ax1.set_ylabel('Proton number',fontsize='xx-large')
if profile == 'charged':
ax1.set_xlabel('Neutron number',fontsize='xx-large')
#pl.title('Isotopic Chart for cycle '+str(int(cycle)))
#
# here below I read data from the flux_*****.DAT file.
#
file_name = 'flux_'+str(cycle).zfill(5)+'.DAT'
print file_name
f = open(file_name)
lines = f.readline()
lines = f.readlines()
f.close()
print_max_flux_in_plot = False
# color map choice for fluxes
#cmapa = cm.jet
cmapa = cm.autumn
# color map choice for arrows
cmapr = cm.autumn
# starting point of arrow
coord_x_1 = []
coord_y_1 = []
# ending point of arrow (option 1)
coord_x_2 = []
coord_y_2 = []
# ending point of arrow (option 2)
coord_x_3 = []
coord_y_3 = []
# fluxes
flux_read = []
flux_log10 = []
if which_flux == None or which_flux == 0:
print 'chart for nucleosynthesis fluxes [dYi/dt]'
line_to_read = 9
elif which_flux == 1:
print 'chart for energy fluxes'
line_to_read = 10
elif which_flux > 1:
print "you have only option 0 or 1, not larger than 1"
single_line = []
for i in range(len(lines)):
single_line.append(lines[i].split())
coord_y_1.append(float(single_line[i][1]))
coord_x_1.append(float(single_line[i][2])-coord_y_1[i])
coord_y_2.append(float(single_line[i][5]))
coord_x_2.append(float(single_line[i][6])-coord_y_2[i])
coord_y_3.append(float(single_line[i][7]))
coord_x_3.append(float(single_line[i][8])-coord_y_3[i])
try:
flux_read.append(float(single_line[i][line_to_read]))
except ValueError: # this is done to avoid format issues like 3.13725-181...
flux_read.append(1.0E-99)
flux_log10.append(log10(flux_read[i]+1.0e-99))
print file_name,' read!'
# I need to select smaller sample, with only fluxes inside plotaxis.
if plotaxis!=[0,0,0,0]:
coord_y_1_small=[]
coord_x_1_small=[]
coord_y_2_small=[]
coord_x_2_small=[]
coord_y_3_small=[]
coord_x_3_small=[]
flux_log10_small = []
for i in range(len(flux_log10)):
I_am_in = 0
if coord_y_1[i] > plotaxis[2] and coord_y_1[i] < plotaxis[3] and coord_x_1[i] > plotaxis[0] and coord_x_1[i] < plotaxis[1]:
I_am_in = 1
coord_y_1_small.append(coord_y_1[i])
coord_x_1_small.append(coord_x_1[i])
coord_y_2_small.append(coord_y_2[i])
coord_x_2_small.append(coord_x_2[i])
coord_y_3_small.append(coord_y_3[i])
coord_x_3_small.append(coord_x_3[i])
flux_log10_small.append(flux_log10[i])
if coord_y_3[i] > plotaxis[2] and coord_y_3[i] < plotaxis[3] and coord_x_3[i] > plotaxis[0] and coord_x_3[i] < plotaxis[1] and I_am_in == 0:
I_am_in = 1
coord_y_1_small.append(coord_y_1[i])
coord_x_1_small.append(coord_x_1[i])
coord_y_2_small.append(coord_y_2[i])
coord_x_2_small.append(coord_x_2[i])
coord_y_3_small.append(coord_y_3[i])
coord_x_3_small.append(coord_x_3[i])
flux_log10_small.append(flux_log10[i])
# elemental labels off/on [0/1]
ilabel = 1
# label for isotopic masses off/on [0/1]
imlabel = 1
# turn lines for magic numbers off/on [0/1]
imagic = 0
# flow is plotted over "prange" dex. If flow < maxflow-prange it is not plotted
if prange == None:
print 'plot range given by default'
prange = 8.
#############################################
#print flux_log10_small
# we should scale prange on plot_axis range, not on max_flux!
max_flux = max(flux_log10)
ind_max_flux = flux_log10.index(max_flux)
if plotaxis!=[0,0,0,0]:
max_flux_small = max(flux_log10_small)
if plotaxis==[0,0,0,0]:
nzmax = int(max(max(coord_y_1),max(coord_y_2),max(coord_y_3)))+1
nnmax = int(max(max(coord_x_1),max(coord_x_2),max(coord_x_3)))+1
coord_x_1_small = coord_x_1
coord_x_2_small = coord_x_2
coord_x_3_small = coord_x_3
coord_y_1_small = coord_y_1
coord_y_2_small = coord_y_2
coord_y_3_small = coord_y_3
flux_log10_small= flux_log10
max_flux_small = max_flux
else:
nzmax = int(max(max(coord_y_1_small),max(coord_y_2_small),max(coord_y_3_small)))+1
nnmax = int(max(max(coord_x_1_small),max(coord_x_2_small),max(coord_x_3_small)))+1
for i in range(nzmax):
for j in range(nnmax):
if nzycheck[j,i,0]==1:
xy = j-0.5,i-0.5
rect = Rectangle(xy,1,1,)
patches.append(rect)
nzycheck = zeros([nnmax_plot,nzmax,3])
coord_x_out = zeros(len(coord_x_2_small))
coord_y_out = zeros(len(coord_y_2_small))
for i in range(len(flux_log10_small)):
nzycheck[coord_x_1_small[i],coord_y_1_small[i],0] = 1
nzycheck[coord_x_1_small[i],coord_y_1_small[i],1] = flux_log10_small[i]
if coord_x_2_small[i] >= coord_x_3_small[i]:
coord_x_out[i] = coord_x_2_small[i]
coord_y_out[i] = coord_y_2_small[i]
nzycheck[coord_x_out[i],coord_y_out[i],0] = 1
nzycheck[coord_x_out[i],coord_y_out[i],1] = flux_log10_small[i]
elif coord_x_2_small[i] < coord_x_3_small[i]:
coord_x_out[i] = coord_x_3_small[i]
coord_y_out[i] = coord_y_3_small[i]
nzycheck[coord_x_out[i],coord_y_out[i],0] = 1
nzycheck[coord_x_out[i],coord_y_out[i],1] = flux_log10_small[i]
if flux_log10_small[i]>max_flux_small-prange:
nzycheck[coord_x_1_small[i],coord_y_1_small[i],2] = 1
nzycheck[coord_x_out[i],coord_y_out[i],2] = 1
#### create plot
if profile == 'charged':
ax2 = fig.add_subplot(1, 2, 2)
elif profile == 'neutron':
ax2 = fig.add_subplot(2, 1, 2)
# Tick marks
xminorlocator = MultipleLocator(1)
xmajorlocator = MultipleLocator(5)
ax2.xaxis.set_major_locator(xmajorlocator)
ax2.xaxis.set_minor_locator(xminorlocator)
yminorlocator = MultipleLocator(1)
ymajorlocator = MultipleLocator(5)
ax2.yaxis.set_major_locator(ymajorlocator)
ax2.yaxis.set_minor_locator(yminorlocator)
## define axis and plot style (colormap, size, fontsize etc.)
if plotaxis==[0,0,0,0]:
xdim=10
ydim=6
else:
dx = plotaxis[1]-plotaxis[0]
dy = plotaxis[3]-plotaxis[2]
ydim = 6
xdim = ydim*dx/dy
format = 'pdf'
# set x- and y-axis scale aspect ratio to 1
#ax2.set_aspect('equal')
# Add black frames for stable isotopes
# Add black frames for stable isotopes
if boxstable:
for i in xrange(len(self.stable_el)):
if i == 0:
continue
tmp = self.stable_el[i]
try:
zz= self.elements_names.index(tmp[0]) #charge
except:
continue
for j in xrange(len(tmp)):
if j == 0:
continue
nn = int(tmp[j]) #atomic mass
nn=nn-zz
xy = nn-0.5,zz-0.5
rect = Rectangle(xy,1,1,ec='k',fc='None',fill='False',lw=4.)
rect.set_zorder(2)
ax2.add_patch(rect)
apatches = []
acolor = []
m = 0.8/prange
vmax=ceil(max(flux_log10_small))
vmin=max(flux_log10_small)-prange
b=-vmin*m+0.1
normr = colors.Normalize(vmin=vmin,vmax=vmax)
ymax=0.
xmax=0.
for i in range(len(flux_log10_small)):
x = coord_x_1_small[i]
y = coord_y_1_small[i]
dx = coord_x_out[i]-coord_x_1_small[i]
dy = coord_y_out[i]-coord_y_1_small[i]
if flux_log10_small[i]>=vmin:
arrowwidth = flux_log10_small[i]*m+b
arrow = Arrow(x,y,dx,dy, width=arrowwidth)
if xmax<x:
xmax=x
if ymax<y:
ymax=y
acol = flux_log10_small[i]
apatches.append(arrow)
acolor.append(acol)
xy = x-0.5,y-0.5
rect = Rectangle(xy,1,1,ec='k',fc='None',fill='False',lw=1.)
patches.append(rect)
xy = x+dx-0.5,y+dy-0.5
rect = Rectangle(xy,1,1,ec='k',fc='None',fill='False',lw=1.)
patches.append(rect)
p = PatchCollection(patches,norm=0,facecolor='w')
p.set_zorder(1)
ax2.add_collection(p)
a = PatchCollection(apatches, cmap=cmapr, norm=normr)
a.set_array(array(acolor))
a.set_zorder(3)
ax2.add_collection(a)
cb = pl.colorbar(a)
# colorbar label
cb.set_label('log$_{10}$($x$)',fontsize='x-large')
if profile == 'neutron':
cb.set_label('log$_{10}$(f)',fontsize='x-large')
# decide which array to take for label positions
iarr = 2
# plot element labels
for z in range(nzmax):
try:
nmin = min(argwhere(nzycheck_plot[:,z,iarr-2]))[0]-1
nmax = max(argwhere(nzycheck_plot[:,z,iarr-2]))[0]+1
ax2.text(nmin,z,elname[z],horizontalalignment='center',verticalalignment='center',fontsize='medium',clip_on=True)
ax2.text(nmax,z,elname[z],horizontalalignment='center',verticalalignment='center',fontsize='medium',clip_on=True)
except ValueError:
continue
# plot mass numbers
if imlabel:
for z in range(nzmax):
for n in range(nnmax_plot):
a = z+n
if nzycheck_plot[n,z,iarr-2]==1:
ax2.text(n,z,a,horizontalalignment='center',verticalalignment='center',fontsize='xx-small',clip_on=True)
# plot lines at magic numbers
if imagic==1:
ixymagic=[2, 8, 20, 28, 50, 82, 126]
nmagic = len(ixymagic)
for magic in ixymagic:
if magic<=nzmax:
try:
xnmin = min(argwhere(nzycheck[:,magic,iarr-2]))[0]
xnmax = max(argwhere(nzycheck[:,magic,iarr-2]))[0]
line = ax2.plot([xnmin,xnmax],[magic,magic],lw=3.,color='r',ls='-')
except ValueError:
dummy=0
if magic<=nnmax:
try:
yzmin = min(argwhere(nzycheck[magic,:,iarr-2]))[0]
yzmax = max(argwhere(nzycheck[magic,:,iarr-2]))[0]
line = ax2.plot([magic,magic],[yzmin,yzmax],lw=3.,color='r',ls='-')
except ValueError:
dummy=0
# set axis limits
if plotaxis==[0,0,0,0]:
ax2.axis([-0.5,xmax+0.5,-0.5,ymax+0.5])
else:
ax2.axis(plotaxis)
# set x- and y-axis label
ax2.set_xlabel('Neutron number',fontsize='xx-large')
if profile == 'neutron':
ax2.set_ylabel('Proton number',fontsize='xx-large')
if which_flux == None or which_flux == 0:
max_flux_label="max flux = "+str('{0:.4f}'.format(max_flux))
elif which_flux == 1:
max_flux_label="max energy flux = "+str('{0:.4f}'.format(max_flux))
if print_max_flux_in_plot:
ax2.text(plotaxis[1]-1.8,plotaxis[2]+0.1,max_flux_label,fontsize=10.)
#fig.savefig(graphname)
print graphname,'is done'
if show:
pl.show()
if turnoff:
ion()
return
def iso_abundMulti(self,cyclist, stable=False,amass_range=None,mass_range=None,
ylim=[0,0],ref=-1,decayed=False,include_title=False,title=None,pdf=False,\
color_plot=True,grid=False,point_set=1):
'''
Method that plots figures and saves those figures to a .png file
(by default). Plots a figure for each cycle in the argument cycle.
Can be called via iso_abund method by passing a list to cycle.
Documentation there.
'''
max_num = max(cyclist)
for i in xrange(len(cyclist)):
self.iso_abund(cyclist[i],stable,amass_range,mass_range,ylim,ref,\
decayed=decayed,show=False,color_plot=color_plot,grid=False,\
point_set=1,include_title=include_title)
if title !=None:
pl.title(title)
else:
name='IsoAbund'
number_str=padding_model_number(cyclist[i],max_num)
if not pdf:
pl.savefig(name+number_str+'.png', dpi=200)
else:
pl.savefig(name+number_str+'.pdf', dpi=200)
pl.clf()
return None
def iso_abund(self,cycle, stable=False,amass_range=None,mass_range=None,ylim=[0,0],
ref=-1,show=True,log_logic=True,decayed=False,color_plot=True,grid=False,\
point_set=1,include_title=False):
''' plot the abundance of all the chemical species
cycle - a string/integer of the cycle of interest.
If it is a list of cycles, this method will do a plot
for each cycle and save them to a file
stable - a boolean of whether to filter out the unstables.
Defaults to False
amass_range -a 1x2 array containing the lower and upper Atomic
mass range. optional. if None plot entire available
atomic mass range
mass_range - a 1x2 array containing the lower and upper mass range.
If this is an instance of abu_vector this will
only plot isotopes that have an atominc mass
within this range. This will throw an error if
this range does not make sense ie [45,2]
if None, it will plot over the entire range
Defaults to None
ref - reference cycle. If it is not -1 (default), this method will
plot the abundences of cycle devided by the cycle of the same
instance given in the ref variable. If ref is a list it will
be interpreted to have two elements: ref=['dir/of/ref/run',cycle]
which uses a refernece cycle from another run. If any
abundence in the reference cycle is zero, it will replace it with
1e-99. The default is -1, it will do nothing.
ylim - A 1x2 array containing the lower and upper Y limits.
Defaults to [0,0], in which case ylim will be determined automatically
log_logic = True[/False] to plot abundances in log scale or linear.
decayed = [True/]False to plot decayed distributions (True), or life
distribution
color_plot - color dots and lines, True or False
grid - add grid, True or default False
point_set - 0 (default), 1 or 2 to select one of three point sets, useful
for multiple abundances or ratios in one plot
'''
plotType=self.classTest()
if str(cycle.__class__)=="<type 'list'>":
self.iso_abundMulti(cycle, stable,amass_range,mass_range,ylim,ref,\
decayed,include_title,color_plot=color_plot,grid=False,point_set=point_set)
return
if mass_range!=None and mass_range[0]>mass_range[1]:
print 'Please input a proper mass range'
print 'Returning None'
return None
if amass_range!=None and amass_range[0]>amass_range[1]:
print 'Please input a proper Atomic mass range'
print 'Returning None'
return None
if plotType=='se':
if decayed:
print 'Decay option not yet implemented for mppnp - but it is easy do! Consider investing the time!'
return None
# get things as arrays
cycle=self.se.findCycle(cycle)
a_iso_to_plot = array(self.se.A)
abunds = self.get(cycle,'iso_massf')
isotope_to_plot = array(self.se.isotopes)
z_iso_to_plot = array(self.se.Z)
isomers_to_plot = array(self.se.isomeric_states)
if ref >-1:
ref=self.se.findCycle(ref)
abundsRef=self.se.get(ref,'iso_massf')
masses = self.se.get(cycle,'mass')
if mass_range == None:
print 'Using default mass range'
mass_range = [min(masses),max(masses)]
masses.sort()
mass_range.sort()
if amass_range == None:
amass_range=[min(a_iso_to_plot),max(a_iso_to_plot)]
# remove neutrons - this could move in the non- se/PPN specific part below
if 0 in z_iso_to_plot:
ind_neut = where(z_iso_to_plot==0)[0][0]
a_iso_to_plot = delete(a_iso_to_plot,ind_neut)
z_iso_to_plot = delete(z_iso_to_plot,ind_neut)
isomers_to_plot = delete(isomers_to_plot,ind_neut)
isotope_to_plot = delete(isotope_to_plot,ind_neut)
abunds = delete(abunds,ind_neut,1)
if ref >-1:
abundsRef = delete(abundsRef,ind_neut,1)
# extract amass_range
acon=(a_iso_to_plot>=amass_range[0]) & (a_iso_to_plot<=amass_range[1])
isomers_to_plot = isomers_to_plot[acon]
isotope_to_plot = isotope_to_plot[acon]
z_iso_to_plot = z_iso_to_plot[acon]
abunds = abunds.T[acon].T
if ref >-1:
abundsRef = abundsRef.T[acon].T
a_iso_to_plot = a_iso_to_plot[acon]
el_iso_to_plot = array([x.split('-')[0] for x in isotope_to_plot.tolist()])
# apply mass range
if mass_range == None:
print 'Using default mass range'
mass_range = [min(masses),max(masses)]
mass_range.sort()
aabs = []
if ref >-1:
cyc = [cycle,ref]
abus = [abunds,abundsRef]
else:
cyc = [cycle]
abus = [abunds]
for cc,aa in zip(cyc,abus):
masses = self.se.get(cc,'mass')
masses.sort()
dmass = masses[1:] - masses[:-1] # I should check the grid definition
dmass = append(dmass,0.)
mcon = (masses>=mass_range[0]) & (masses<=mass_range[1])
dmass = dmass[mcon]
aa = aa[mcon]
# average over mass range:
aa = (aa.T*dmass).T.sum(0)
aa = aa / (mass_range[1] - mass_range[0])
# abunds has now length of isotope_to_plot
aabs.append(aa)
if ref >-1:
abunds = aabs[0]/(aabs[1]+1.e-99)
else:
abunds = aabs[0]
self.a_iso_to_plot=a_iso_to_plot
self.isotope_to_plot=isotope_to_plot
self.z_iso_to_plot=z_iso_to_plot
self.el_iso_to_plot=el_iso_to_plot
self.abunds=abunds
self.isomers_to_plot=isomers_to_plot
# self.isotopes = self.se.isotopes
elif plotType=='PPN':
print "This method adds the following variables to the instance:"
print "a_iso_to_plot mass number of plotted range of species"
print "isotope_to_plot corresponding list of isotopes"
print "z_iso_to_plot corresponding charge numbers"
print "el_iso_to_plot corresponding element names"
print "abunds corresponding abundances"
print "isom isomers and their abundance"
self.get(cycle,decayed=decayed)
if ref is not -1:
if type(ref) is list: # reference cycle from other run
import ppn
pp=ppn.abu_vector(ref[0])
abunds_pp=pp.get(ref[1],decayed=decayed)
self.abunds=self.abunds/pp.abunds
else:
abunds=self.abunds
self.get(ref,decayed=decayed)
self.abunds=abunds/(self.abunds+1.e-99)
if amass_range == None:
amass_range=[min(self.a_iso_to_plot),max(self.a_iso_to_plot)]
aa=ma.masked_outside(self.a_iso_to_plot,amass_range[0],amass_range[1])
isotope_to_plot=ma.array(self.isotope_to_plot,mask=aa.mask).compressed()
z_iso_to_plot=ma.array(self.z_iso_to_plot,mask=aa.mask).compressed()
el_iso_to_plot=ma.array(self.el_iso_to_plot,mask=aa.mask).compressed()
abunds=ma.array(self.abunds,mask=aa.mask).compressed()
a_iso_to_plot=aa.compressed()
isomers_to_plot=[]
for i in xrange(len(self.isom)):
if int(self.isom[i][0].split('-')[1])>100:
isomers_to_plot.append(self.isom[i])
self.a_iso_to_plot=a_iso_to_plot
self.isotope_to_plot=isotope_to_plot
self.z_iso_to_plot=z_iso_to_plot
self.el_iso_to_plot=el_iso_to_plot
self.abunds=abunds
self.isomers_to_plot=isomers_to_plot
else:
print 'This method, iso_abund, is not supported by this class'
print 'Returning None'
return None
print 'Using the following conditions:'
if plotType=='se':
print '\tmass_range:', mass_range[0], mass_range[1]
print '\tAtomic mass_range:', amass_range[0], amass_range[1]
print '\tcycle: ',cycle
print '\tplot only stable:',stable
print '\tplot decayed: ',decayed
if stable: # remove unstables:
# For the element that belongs to the isotope at index 5 in isotope_to_plot
# (C-12) the following gives the mass numbers of stable elements:
# self.stable_el[self.stable_names.index(el_iso_to_plot[5])][1:]
ind_delete=[]
for i in range(len(isotope_to_plot)):
if a_iso_to_plot[i] not in self.stable_el[self.stable_names.index(el_iso_to_plot[i])][1:]:
ind_delete.append(i)
a_iso_to_plot = delete(a_iso_to_plot, ind_delete)
z_iso_to_plot = delete(z_iso_to_plot, ind_delete)
isomers_to_plot = delete(isomers_to_plot,ind_delete)
isotope_to_plot = delete(isotope_to_plot,ind_delete)
el_iso_to_plot = delete(el_iso_to_plot, ind_delete)
abunds = delete(abunds, ind_delete)
el_list=[] # list of elements in el_iso_to_plot
for el in self.elements_names:
if el in el_iso_to_plot:
el_list.append(el)
abund_plot = [] # extract for each element an abundance and associated
mass_num = [] # mass number array, sorted by mass number
for el in el_list:
numbers = a_iso_to_plot[(el_iso_to_plot==el)]
abund_plot.append(abunds[(el_iso_to_plot==el)][argsort(numbers)])
mass_num.append(sort(numbers))
# now plot:
plot_type = ['-','--','-.',':','-']
pl_index = 0
points = [['o','^','p','h','*'],['x','+','D','>','s'],['H','v','<','*','3']]
if color_plot:
colors = ['g','r','c','m','k']
else:
colors = ['k','k','k','k','k']
ylim1 = 1.e99
ylim2 = -1.e99
for j in xrange(len(abund_plot)): #Loop through the elements of interest
# Process the line
#print 'processing line'
for l in xrange(len(abund_plot[j])):
# print mass_num[j][l]
# print abund_plot[j][l]
if abund_plot[j][l] == 0:
abund_plot[j][l] = 1e-99
a_dum=zeros(len(abund_plot[j])) # this I (FH) have to do because for some
if log_logic == False: # reason log10(abu_abund[j]) does not work
a_dum = abund_plot[j] # although abu_abund[j] is a numpy array?!?
else:
for ii in range(len(abund_plot[j])):
a_dum[ii]=log10(abund_plot[j][ii])
this_label=str(colors[pl_index]+points[point_set][pl_index]+\
plot_type[pl_index])
pl.plot(mass_num[j],a_dum,this_label,markersize=10)
abu_max = max(a_dum)
max_index=where(a_dum==abu_max)[0][0]
coordinates=[mass_num[j][max_index],abu_max]
pl.text(coordinates[0]+0.1,1.05*coordinates[1],el_list[j])
pl_index+=1
if pl_index > 4:
pl_index = 0
ylim1=min(ylim1,min(a_dum))
ylim2=max(ylim2,max(a_dum))
# now trimming the ylims
if log_logic:
dylim=0.05*(ylim2-ylim1)
ylim1 = ylim1 -dylim
ylim2 = ylim2 +dylim
if ref is not -1:
ylim2 = min(ylim2,4)
ylim1 = max(ylim1,-4)
else:
ylim2 = min(ylim2,0.2)
ylim1 = max(ylim1,-13)
else:
ylim1 = ylim1 *0.8
ylim2 = ylim2 *1.1
if include_title:
if plotType=='se':
if ref == -1:
title = str('Range %4.2f' %mass_range[0]) + str('-%4.2f' %mass_range[1]) +\
str(' for cycle %d' %int(cycle))
else:
title = str('Range %4.2f' %mass_range[0]) + \
str('-%4.2f' %mass_range[1]) + str(' for cycle %d' %int(cycle))+\
str(' relative to cycle %d' %int(ref))
else:
if ref == -1:
title = str('Cycle %d' %int(cycle))
else:
title = str('Cycle %d' %int(cycle))+\
str(' relative to cycle %d' %int(ref))
print "including title: ..."
pl.title(title)
if ylim[0] == 0 and ylim[1] == 0:
pl.ylim(ylim1,ylim2)
else:
pl.ylim(ylim[0],ylim[1])
pl.xlim([amass_range[0]-.5,amass_range[1]+.5])
pl.xlabel('mass number (A)',fontsize=14)
if ref is not -1:
if log_logic:
pl.ylabel(r'log abundance ratio',fontsize=14)
else:
pl.ylabel(r'abundance ratio',fontsize=14)
else:
if log_logic:
pl.ylabel(r'log mass fraction ',fontsize=14)
else:
pl.ylabel(r'mass fraction',fontsize=14)
if grid:
pl.grid()
if show:
pl.show()
if amass_range != None:
minimum_mass = amass_range[0]
maximum_mass = amass_range[1]
elif mass_range != None:
minimum_mass = mass_range[0]
maximum_mass = mass_range[1]
else:
minimum_mass = 0
maximum_mass = 200
if log_logic == False:
pl.plot([amass_range[0]-.5,amass_range[1]+.5],[1,1],'k-')
else:
pl.plot([amass_range[0]-.5,amass_range[1]+.5],[0,0],'k-')
ax=pl.axes()
labelsx=[]
if (maximum_mass-minimum_mass) > 100:
delta_labelsx = 10
else:
delta_labelsx = 5
iii = amass_range[0]%delta_labelsx
if iii == 0:
labelsx.append(str(amass_range[0]))
else:
labelsx.append(' ')
iii = iii+1
kkk = 0
for label1 in range(amass_range[1]-amass_range[0]):
if iii == 5:
kkk = kkk+1
labelsx.append(str((iii*kkk)+amass_range[0]-(amass_range[0]%5)))
iii = 0
iii = iii+1
else:
labelsx.append(' ')
iii = iii+1
if delta_labelsx == 5:
xticks = arange(amass_range[0],amass_range[1],1)
pl.xticks(xticks,labelsx)
else:
pl.xticks()
##!!FOR!!###### print 'LEN LABELS= ', len(labelsx)
##DEBUGGING####
####!!!######## for bbb in range (len(labelsx)):
############### print labelsx[bbb]
return
def plotprofMulti(self,ini,end,delta,what_specie,xlim1,xlim2,ylim1,ylim2,symbol=None):
''' create a movie with mass fractions vs mass coordinate
between xlim1 and xlim2, ylim1 and ylim2. Only works with instances of se
ini - initial model
end - final model
delta - sparsity factor of the frames
what_specie - array with species in the plot
xlim1, xlim2 - mass coordinate range
ylim1, ylim2 - mass fraction coordinate range
symbol - array indicating which symbol you want to use. Must be of the same len of what_specie array
'''
plotType=self.classTest()
if plotType=='se':
for i in range(ini,end+1,delta):
step = int(i)
#print step
if symbol==None:
symbol_dummy = '-'
for j in range(len(what_specie)):
self.plot_prof_1(step,what_specie[j],xlim1,xlim2,ylim1,ylim2,symbol_dummy)
else:
for j in range(len(what_specie)):
symbol_dummy = symbol[j]
self.plot_prof_1(step,what_specie[j],xlim1,xlim2,ylim1,ylim2,symbol_dummy)
#
filename = str('%03d' % step)+'_test.png'
pl.savefig(filename, dpi=400)
print 'wrote file ', filename
#
pl.clf()
else:
print 'This method is not supported for '+str(self.__class__)
return
# From mesa_profile
def plot_prof_1(self,species,keystring,xlim1,xlim2,ylim1,ylim2,symbol=None, show=False):
''' plot one species for cycle between xlim1 and xlim2
Only works with instances of se and mesa _profile
species - which species to plot
keystring - label that appears in the plot or in the cas of se,
A cycle or list of cycles
xlim1, xlim2 - mass coordinate range
ylim1, ylim2 - mass fraction coordinate range
symbol - indicate which symbol you want to use, if required. '''
plotType=self.classTest()
if plotType=='se':
#tot_mass=self.se.get(keystring,'total_mass')
tot_mass=self.se.get('mini')
age=self.se.get(keystring,'age')
mass=self.se.get(keystring,'mass')
Xspecies=self.se.get(keystring,'iso_massf',species)
mod=keystring
elif plotType=='mesa_profile':
tot_mass=self.header_attr['star_mass']
age=self.header_attr['star_age']
mass=self.get('mass')
mod=self.header_attr['model_number']
Xspecies=self.get(species)
else:
print 'This method is not supported for '+str(self.__class__)
return
if symbol == None:
symbol = '-'
x,y=self.logarithm(Xspecies,mass,True,False,10)
#print x
pl.plot(y,x,symbol,label=str(species))
pl.xlim(xlim1,xlim2)
pl.ylim(ylim1,ylim2)
pl.legend()
pl.xlabel('$Mass$ $coordinate$', fontsize=20)
pl.ylabel('$X_{i}$', fontsize=20)
pl.title('Mass='+str(tot_mass)+', Time='+str(age)+' years, cycle='+str(mod))
if show:
pl.show()
# From mesa.star_log
def flux_chart(file_name,plotaxis,plot_type,which_flux=None,I_am_the_target=None,prange=None):
'''
Plots a chart with fluxes
input:
file_name: name of the file of fluxes we are looking at.
plotaxis: [xmin,xmax,ymin,ymax], where on x axis there is neutron number and on y axis there is Z.
plot_types: 0 for standard flux plot, 1 if fluxes focused on one specie.
Note: the script is terribly slow, need to be improved. For now I put here in data_plot:
[1]: import data_plot
[2]: data_plot.flux_chart('file_name',[xmin,xmax,ymin,ymax],int,which_flux,I_am_the_target,prange)
The pdf is created, but an error bumped up and the gui is empty. To avoid this, I had to set 'text.usetex': False. See below.
Also, for the same reason no label in x axys is written using 'text.usetex': True.
Note also that the GUI works really slow with this plot. so, we need to optimize from the graphic point of view.
This need to be included in ppn.py I think, and set in multi option too, in case we want to read more flux files at the same time.
Finally, you need to have stable.dat to read in to make it work....
which_flux = 0 is for nucleosynthesis flux plot, which_flux = 1 is for energy flux plot, None is default, that is option 0.
I_am_the_target is a 2Xarray used only if plot_type=1, and is given by [neutron number,proton number].
prange is the range of fluxes to be considered.
'''
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.mpl import colors,cm
from matplotlib.patches import Rectangle, Arrow
from matplotlib.collections import PatchCollection
from matplotlib.offsetbox import AnchoredOffsetbox, TextArea
import sys
print_max_flux_in_plot = True
f = open(file_name)
lines = f.readline()
lines = f.readlines()
f.close()
# starting point of arrow
coord_x_1 = []
coord_y_1 = []
# ending point of arrow (option 1)
coord_x_2 = []
coord_y_2 = []
# ending point of arrow (option 2)
coord_x_3 = []
coord_y_3 = []
# fluxes
flux_read = []
flux_log10 = []
if which_flux == None or which_flux == 0:
print 'chart for nucleosynthesis fluxes [dYi/dt]'
line_to_read = 9
elif which_flux == 1:
print 'chart for energy fluxes'
line_to_read = 10
elif which_flux == 2:
print 'chart for timescales'
line_to_read = 11
elif which_flux > 2:
print "you have only option 0, 1 or 2, not larger than 2"
single_line = []
for i in range(len(lines)):
single_line.append(lines[i].split())
coord_y_1.append(float(single_line[i][1]))
coord_x_1.append(float(single_line[i][2])-coord_y_1[i])
coord_y_2.append(float(single_line[i][5]))
coord_x_2.append(float(single_line[i][6])-coord_y_2[i])
coord_y_3.append(float(single_line[i][7]))
coord_x_3.append(float(single_line[i][8])-coord_y_3[i])
try:
flux_read.append(float(single_line[i][line_to_read]))
except ValueError: # this is done to avoid format issues like 3.13725-181...
flux_read.append(1.0E-99)
flux_log10.append(np.log10(flux_read[i]+1.0e-99))
print 'file read!'
# I need to select smaller sample, with only fluxes inside plotaxis.
coord_y_1_small=[]
coord_x_1_small=[]
coord_y_2_small=[]
coord_x_2_small=[]
coord_y_3_small=[]
coord_x_3_small=[]
flux_log10_small = []
for i in range(len(flux_log10)):
I_am_in = 0
if coord_y_1[i] > plotaxis[2] and coord_y_1[i] < plotaxis[3] and coord_x_1[i] > plotaxis[0] and coord_x_1[i] < plotaxis[1]:
I_am_in = 1
coord_y_1_small.append(coord_y_1[i])
coord_x_1_small.append(coord_x_1[i])
coord_y_2_small.append(coord_y_2[i])
coord_x_2_small.append(coord_x_2[i])
coord_y_3_small.append(coord_y_3[i])
coord_x_3_small.append(coord_x_3[i])
flux_log10_small.append(flux_log10[i])
if coord_y_3[i] > plotaxis[2] and coord_y_3[i] < plotaxis[3] and coord_x_3[i] > plotaxis[0] and coord_x_3[i] < plotaxis[1] and I_am_in == 0:
I_am_in = 1
coord_y_1_small.append(coord_y_1[i])
coord_x_1_small.append(coord_x_1[i])
coord_y_2_small.append(coord_y_2[i])
coord_x_2_small.append(coord_x_2[i])
coord_y_3_small.append(coord_y_3[i])
coord_x_3_small.append(coord_x_3[i])
flux_log10_small.append(flux_log10[i])
if plot_type == 1:
print 'I_am_the_target=',I_am_the_target
#I_am_the_target = [56.-26.,26.]
# here below need for plotting
# plotaxis = [xmin,xmax,ymin,ymax]
#plotaxis=[1,20,1,20]
#plotaxis=[0,0,0,0]
# elemental labels off/on [0/1]
ilabel = 1
# label for isotopic masses off/on [0/1]
imlabel = 1
# turn lines for magic numbers off/on [0/1]
imagic = 0
# flow is plotted over "prange" dex. If flow < maxflow-prange it is not plotted
if prange == None:
print 'plot range given by default'
prange = 8.
#############################################
# we should scale prange on plot_axis range, not on max_flux!
max_flux = max(flux_log10)
ind_max_flux = flux_log10.index(max_flux)
max_flux_small = max(flux_log10_small)
min_flux = min(flux_log10)
ind_min_flux = flux_log10.index(min_flux)
min_flux_small = min(flux_log10_small)
#nzmax = int(max(max(coord_y_1),max(coord_y_2),max(coord_y_3)))+1
#nnmax = int(max(max(coord_x_1),max(coord_x_2),max(coord_x_3)))+1
nzmax = int(max(max(coord_y_1_small),max(coord_y_2_small),max(coord_y_3_small)))+1
nnmax = int(max(max(coord_x_1_small),max(coord_x_2_small),max(coord_x_3_small)))+1
nzycheck = np.zeros([nnmax,nzmax,3])
#coord_x_out = np.zeros(len(coord_x_2))
#coord_y_out = np.zeros(len(coord_y_2))
#for i in range(len(flux_log10)):
# nzycheck[coord_x_1[i],coord_y_1[i],0] = 1
# nzycheck[coord_x_1[i],coord_y_1[i],1] = flux_log10[i]
# if coord_x_2[i] >= coord_x_3[i]:
# coord_x_out[i] = coord_x_2[i]
# coord_y_out[i] = coord_y_2[i]
# nzycheck[coord_x_out[i],coord_y_out[i],0] = 1
# nzycheck[coord_x_out[i],coord_y_out[i],1] = flux_log10[i]
# elif coord_x_2[i] < coord_x_3[i]:
# coord_x_out[i] = coord_x_3[i]
# coord_y_out[i] = coord_y_3[i]
# nzycheck[coord_x_out[i],coord_y_out[i],0] = 1
# nzycheck[coord_x_out[i],coord_y_out[i],1] = flux_log10[i]
# if flux_log10[i]>max_flux-prange:
# nzycheck[coord_x_1[i],coord_y_1[i],2] = 1
# nzycheck[coord_x_out[i],coord_y_out[i],2] = 1
coord_x_out = np.zeros(len(coord_x_2_small))
coord_y_out = np.zeros(len(coord_y_2_small))
for i in range(len(flux_log10_small)):
nzycheck[coord_x_1_small[i],coord_y_1_small[i],0] = 1
nzycheck[coord_x_1_small[i],coord_y_1_small[i],1] = flux_log10_small[i]
if coord_x_2_small[i] >= coord_x_3_small[i]:
coord_x_out[i] = coord_x_2_small[i]
coord_y_out[i] = coord_y_2_small[i]
nzycheck[coord_x_out[i],coord_y_out[i],0] = 1
nzycheck[coord_x_out[i],coord_y_out[i],1] = flux_log10_small[i]
elif coord_x_2_small[i] < coord_x_3_small[i]:
coord_x_out[i] = coord_x_3_small[i]
coord_y_out[i] = coord_y_3_small[i]
nzycheck[coord_x_out[i],coord_y_out[i],0] = 1
nzycheck[coord_x_out[i],coord_y_out[i],1] = flux_log10_small[i]
if which_flux == None or which_flux < 2 and flux_log10_small[i]>max_flux_small-prange:
nzycheck[coord_x_1_small[i],coord_y_1_small[i],2] = 1
nzycheck[coord_x_out[i],coord_y_out[i],2] = 1
elif which_flux == 2 and flux_log10_small[i]<min_flux_small+prange:
nzycheck[coord_x_1_small[i],coord_y_1_small[i],2] = 1
nzycheck[coord_x_out[i],coord_y_out[i],2] = 1
#######################################################################
# elemental names: elname(i) is the name of element with Z=i
elname= ('none','H','He','Li','Be','B','C','N','O','F','Ne','Na','Mg','Al','Si','P','S','Cl','Ar','K','Ca','Sc','Ti','V','Cr','Mn','Fe',
'Co','Ni','Cu','Zn','Ga','Ge','As','Se','Br','Kr','Rb','Sr','Y','Zr','Nb','Mo','Tc','Ru','Rh','Pd','Ag','Cd','In','Sn','Sb',
'Te', 'I','Xe','Cs','Ba','La','Ce','Pr','Nd','Pm','Sm','Eu','Gd','Tb','Dy','Ho','Er','Tm','Yb','Lu','Hf','Ta','W','Re','Os',
'Ir','Pt','Au','Hg','Tl','Pb','Bi','Po','At','Rn','Fr','Ra','Ac','Th','Pa','U','Np','Pu')
#### create plot
## define axis and plot style (colormap, size, fontsize etc.)
if plotaxis==[0,0,0,0]:
xdim=10
ydim=6
else:
dx = plotaxis[1]-plotaxis[0]
dy = plotaxis[3]-plotaxis[2]
ydim = 6
xdim = ydim*dx/dy
format = 'pdf'
# note that I had to set 'text.usetex': False, to avoid Exception in Tkinter callback.
# and to make the GUI work properly. Why? some missing package?
params = {'axes.labelsize': 15,
'text.fontsize': 15,
'legend.fontsize': 15,
'xtick.labelsize': 15,
'ytick.labelsize': 15,
'text.usetex': False}
plt.rcParams.update(params)
fig=plt.figure(figsize=(xdim,ydim),dpi=100)
axx = 0.10
axy = 0.10
axw = 0.85
axh = 0.8
ax=plt.axes([axx,axy,axw,axh])
# color map choice for abundances
cmapa = cm.jet
# color map choice for arrows
if which_flux == None or which_flux < 2:
cmapr = cm.autumn
elif which_flux == 2:
cmapr = cm.autumn_r
# if a value is below the lower limit its set to white
cmapa.set_under(color='w')
cmapr.set_under(color='w')
# set value range for abundance colors (log10(Y))
norma = colors.Normalize(vmin=-20,vmax=0)
# set x- and y-axis scale aspect ratio to 1
ax.set_aspect('equal')
#print time,temp and density on top
#temp = '%8.3e' %ff['temp']
#time = '%8.3e' %ff['time']
#dens = '%8.3e' %ff['dens']
#box1 = TextArea("t : " + time + " s~~/~~T$_{9}$ : " + temp + "~~/~~$\\rho_{b}$ : " \
# + dens + ' g/cm$^{3}$', textprops=dict(color="k"))
#anchored_box = AnchoredOffsetbox(loc=3,
# child=box1, pad=0.,
# frameon=False,
# bbox_to_anchor=(0., 1.02),
# bbox_transform=ax.transAxes,
# borderpad=0.,
# )
#ax.add_artist(anchored_box)
# Add black frames for stable isotopes
f = open('stable.dat')
head = f.readline()
stable = []
for line in f.readlines():
tmp = line.split()
zz = int(tmp[2])
nn = int(tmp[3])
xy = nn-0.5,zz-0.5
rect = Rectangle(xy,1,1,ec='k',fc='None',fill='False',lw=3.)
rect.set_zorder(2)
ax.add_patch(rect)
apatches = []
acolor = []
m = 0.8/prange#0.8/prange
if which_flux == None or which_flux < 2:
vmax=np.ceil(max(flux_log10_small))
vmin=max(flux_log10_small)-prange
b=-vmin*m+0.1
elif which_flux == 2:
vmin=min(flux_log10_small)
vmax=np.ceil(min(flux_log10_small)+prange)
b=vmax*m+0.1
if which_flux == None or which_flux < 3:
normr = colors.Normalize(vmin=vmin,vmax=vmax)
print 'vmin and vmax =',vmin,vmax
ymax=0.
xmax=0.
for i in range(len(flux_log10_small)):
x = coord_x_1_small[i]
y = coord_y_1_small[i]
dx = coord_x_out[i]-coord_x_1_small[i]
dy = coord_y_out[i]-coord_y_1_small[i]
if plot_type == 0:
if which_flux == None or which_flux < 2:
if flux_log10_small[i]>=vmin:
arrowwidth = flux_log10_small[i]*m+b
arrow = Arrow(x,y,dx,dy, width=arrowwidth)
if xmax<x:
xmax=x
if ymax<y:
ymax=y
acol = flux_log10_small[i]
apatches.append(arrow)
acolor.append(acol)
elif which_flux == 2:
if flux_log10_small[i]<=vmax:
arrowwidth = -flux_log10_small[i]*m+b
arrow = Arrow(x,y,dx,dy, width=arrowwidth)
if xmax<x:
xmax=x
if ymax<y:
ymax=y
acol = flux_log10_small[i]
apatches.append(arrow)
acolor.append(acol)
elif plot_type == 1 and which_flux != 2:
if x==I_am_the_target[0] and y==I_am_the_target[1] and flux_log10_small[i]>=vmin:
arrowwidth = flux_log10_small[i]*m+b
arrow = Arrow(x,y,dx,dy, width=arrowwidth)
if xmax<x:
xmax=x
if ymax<y:
ymax=y
acol = flux_log10_small[i]
apatches.append(arrow)
acolor.append(acol)
if x+dx==I_am_the_target[0] and y+dy==I_am_the_target[1] and flux_log10_small[i]>=vmin:
arrowwidth = flux_log10_small[i]*m+b
arrow = Arrow(x,y,dx,dy, width=arrowwidth)
if xmax<x:
xmax=x
if ymax<y:
ymax=y
acol = flux_log10_small[i]
apatches.append(arrow)
acolor.append(acol)
elif plot_type == 1 and which_flux == 2:
if x==I_am_the_target[0] and y==I_am_the_target[1] and flux_log10_small[i]<=vmax:
arrowwidth = -flux_log10_small[i]*m+b
arrow = Arrow(x,y,dx,dy, width=arrowwidth)
if xmax<x:
xmax=x
if ymax<y:
ymax=y
acol = flux_log10_small[i]
apatches.append(arrow)
acolor.append(acol)
if x+dx==I_am_the_target[0] and y+dy==I_am_the_target[1] and flux_log10_small[i]<=vmax:
arrowwidth = -flux_log10_small[i]*m+b
arrow = Arrow(x,y,dx,dy, width=arrowwidth)
if xmax<x:
xmax=x
if ymax<y:
ymax=y
acol = flux_log10_small[i]
apatches.append(arrow)
acolor.append(acol)
#apatches = []
#acolor = []
#m = 0.8/prange
#vmax=np.ceil(max(flux_log10))
#vmin=max(flux_log10)-prange
#b=-vmin*m+0.1
#normr = colors.Normalize(vmin=vmin,vmax=vmax)
#ymax=0.
#xmax=0.
#for i in range(len(flux_log10)):
# x = coord_x_1[i]
# y = coord_y_1[i]
# dx = coord_x_out[i]-coord_x_1[i]
# dy = coord_y_out[i]-coord_y_1[i]
# if plot_type == 0:
# if flux_log10[i]>=vmin:
# arrowwidth = flux_log10[i]*m+b
# arrow = Arrow(x,y,dx,dy, width=arrowwidth)
# if xmax<x:
# xmax=x
# if ymax<y:
# ymax=y
# acol = flux_log10[i]
# apatches.append(arrow)
# acolor.append(acol)
# elif plot_type == 1:
# if x==I_am_the_target[0] and y==I_am_the_target[1] and flux_log10[i]>=vmin:
# arrowwidth = flux_log10[i]*m+b
# arrow = Arrow(x,y,dx,dy, width=arrowwidth)
# if xmax<x:
# xmax=x
# if ymax<y:
# ymax=y
# acol = flux_log10[i]
# apatches.append(arrow)
# acolor.append(acol)
# if x+dx==I_am_the_target[0] and y+dy==I_am_the_target[1] and flux_log10[i]>=vmin:
# arrowwidth = flux_log10[i]*m+b
# arrow = Arrow(x,y,dx,dy, width=arrowwidth)
# if xmax<x:
# xmax=x
# if ymax<y:
# ymax=y
# acol = flux_log10[i]
# apatches.append(arrow)
# acolor.append(acol)
#
xy = x-0.5,y-0.5
rect = Rectangle(xy,1,1,ec='k',fc='None',fill='False',lw=1.)
rect.set_zorder(2)
ax.add_patch(rect)
xy = x+dx-0.5,y+dy-0.5
rect = Rectangle(xy,1,1,ec='k',fc='None',fill='False',lw=1.)
rect.set_zorder(2)
ax.add_patch(rect)
a = PatchCollection(apatches, cmap=cmapr, norm=normr)
a.set_array(np.array(acolor))
a.set_zorder(3)
ax.add_collection(a)
cb = plt.colorbar(a)
# colorbar label
if which_flux == None or which_flux == 0:
cb.set_label('log$_{10}$(f)')
elif which_flux ==1:
cb.set_label('log$_{10}$(E)')
elif which_flux ==2:
cb.set_label('log$_{10}$(timescale)')
# plot file name
graphname = 'flow-chart.'+format
# decide which array to take for label positions
iarr = 2
# plot element labels
for z in range(nzmax):
try:
nmin = min(np.argwhere(nzycheck[:,z,iarr-2]))[0]-1
ax.text(nmin,z,elname[z],horizontalalignment='center',verticalalignment='center',fontsize='medium',clip_on=True)
except ValueError:
continue
# plot mass numbers
if imlabel==1:
for z in range(nzmax):
for n in range(nnmax):
a = z+n
if nzycheck[n,z,iarr-2]==1:
ax.text(n,z,a,horizontalalignment='center',verticalalignment='center',fontsize='small',clip_on=True)
# plot lines at magic numbers
if imagic==1:
ixymagic=[2, 8, 20, 28, 50, 82, 126]
nmagic = len(ixymagic)
for magic in ixymagic:
if magic<=nzmax:
try:
xnmin = min(np.argwhere(nzycheck[:,magic,iarr-2]))[0]
xnmax = max(np.argwhere(nzycheck[:,magic,iarr-2]))[0]
line = ax.plot([xnmin,xnmax],[magic,magic],lw=3.,color='r',ls='-')
except ValueError:
dummy=0
if magic<=nnmax:
try:
yzmin = min(np.argwhere(nzycheck[magic,:,iarr-2]))[0]
yzmax = max(np.argwhere(nzycheck[magic,:,iarr-2]))[0]
line = ax.plot([magic,magic],[yzmin,yzmax],lw=3.,color='r',ls='-')
except ValueError:
dummy=0
# set axis limits
if plotaxis==[0,0,0,0]:
ax.axis([-0.5,xmax+0.5,-0.5,ymax+0.5])
else:
ax.axis(plotaxis)
# set x- and y-axis label
ax.set_xlabel('neutron number')
ax.set_ylabel('proton number')
if which_flux == None or which_flux == 0:
max_flux_label="max flux = "+str('{0:.4f}'.format(max_flux))
elif which_flux == 1:
max_flux_label="max energy flux = "+str('{0:.4f}'.format(max_flux))
elif which_flux == 2:
min_flux_label="min timescale [s] = "+str('{0:.4f}'.format(min_flux))
if print_max_flux_in_plot:
if which_flux == None or which_flux < 2:
ax.text(plotaxis[1]-1.8,plotaxis[2]+0.1,max_flux_label,fontsize=10.)
elif which_flux == 2:
ax.text(plotaxis[1]-1.8,plotaxis[2]+0.1,min_flux_label,fontsize=10.)
fig.savefig(graphname)
print graphname,'is done'
if which_flux == None or which_flux < 2:
print max_flux_label,'for reaction =',ind_max_flux+1
elif which_flux == 2:
print min_flux_label,'for reaction =',ind_min_flux+1
plt.show()
|
{
"content_hash": "327abfeb4d192144db397339eb823dd1",
"timestamp": "",
"source": "github",
"line_count": 2866,
"max_line_length": 458,
"avg_line_length": 33.78855547801815,
"alnum_prop": 0.5857101551044012,
"repo_name": "mrawls/mesa-tools",
"id": "92f0744dc4b22b81efc3ea02f910e8438101ed7c",
"size": "96838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data_plot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "170524"
}
],
"symlink_target": ""
}
|
"""
Tests for Glance Registry's client.
This tests are temporary and will be removed once
the registry's driver tests will be added.
"""
import copy
import datetime
import os
import mox
from glance.common import config
from glance.common import exception
from glance import context
from glance.db.sqlalchemy import api as db_api
from glance.openstack.common import timeutils
from glance.openstack.common import uuidutils
import glance.registry.client.v2.api as rapi
from glance.registry.client.v2.api import client as rclient
from glance.registry.api import v2 as rserver
from glance.tests.unit import base
from glance.tests import utils as test_utils
_gen_uuid = uuidutils.generate_uuid
UUID1 = _gen_uuid()
UUID2 = _gen_uuid()
#NOTE(bcwaldon): needed to init config_dir cli opt
config.parse_args(args=[])
class TestRegistryV2Client(base.IsolatedUnitTest,
test_utils.RegistryAPIMixIn):
"""
Test proper actions made for both valid and invalid requests
against a Registry service
"""
# Registry server to user
# in the stub.
registry = rserver
def setUp(self):
"""Establish a clean test environment"""
super(TestRegistryV2Client, self).setUp()
db_api.setup_db_env()
db_api.get_engine()
self.context = context.RequestContext(is_admin=True)
self.FIXTURES = [
self.get_extra_fixture(
id=UUID1, name='fake image #1', is_public=False,
disk_format='ami', container_format='ami', size=13,
location="swift://user:passwd@acct/container/obj.tar.0",
properties={'type': 'kernel'}),
self.get_extra_fixture(id=UUID2, name='fake image #2',
properties={}, size=19,
location="file:///tmp/glance-tests/2")]
self.destroy_fixtures()
self.create_fixtures()
self.client = rclient.RegistryClient("0.0.0.0")
def tearDown(self):
"""Clear the test environment"""
super(TestRegistryV2Client, self).tearDown()
self.destroy_fixtures()
def test_image_get_index(self):
"""Test correct set of public image returned"""
images = self.client.image_get_all()
self.assertEqual(len(images), 2)
def test_create_image_with_null_min_disk_min_ram(self):
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf', min_disk=None,
min_ram=None)
db_api.image_create(self.context, extra_fixture)
image = self.client.image_get(image_id=UUID3)
self.assertEqual(0, image["min_ram"])
self.assertEqual(0, image["min_disk"])
def test_get_index_sort_name_asc(self):
"""
Tests that the registry API returns list of
public images sorted alphabetically by name in
ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='name', sort_dir='asc')
self.assertEqualImages(images, (UUID3, UUID1, UUID2, UUID4),
unjsonify=False)
def test_get_index_sort_status_desc(self):
"""
Tests that the registry API returns list of
public images sorted alphabetically by status in
descending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
status='queued')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='status', sort_dir='desc')
self.assertEqualImages(images, (UUID3, UUID4, UUID2, UUID1),
unjsonify=False)
def test_get_index_sort_disk_format_asc(self):
"""
Tests that the registry API returns list of
public images sorted alphabetically by disk_format in
ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
disk_format='ami',
container_format='ami')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz',
disk_format='vdi')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='disk_format',
sort_dir='asc')
self.assertEqualImages(images, (UUID1, UUID3, UUID4, UUID2),
unjsonify=False)
def test_get_index_sort_container_format_desc(self):
"""
Tests that the registry API returns list of
public images sorted alphabetically by container_format in
descending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
disk_format='ami',
container_format='ami')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz',
disk_format='iso',
container_format='bare')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='container_format',
sort_dir='desc')
self.assertEqualImages(images, (UUID2, UUID4, UUID3, UUID1),
unjsonify=False)
def test_get_index_sort_size_asc(self):
"""
Tests that the registry API returns list of
public images sorted by size in ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
disk_format='ami',
container_format='ami', size=100)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='asdf',
disk_format='iso',
container_format='bare', size=2)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='size', sort_dir='asc')
self.assertEqualImages(images, (UUID4, UUID1, UUID2, UUID3),
unjsonify=False)
def test_get_index_sort_created_at_asc(self):
"""
Tests that the registry API returns list of
public images sorted by created_at in ascending order.
"""
now = timeutils.utcnow()
time1 = now + datetime.timedelta(seconds=5)
time2 = now
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, created_at=time1)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, created_at=time2)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='created_at',
sort_dir='asc')
self.assertEqualImages(images, (UUID1, UUID2, UUID4, UUID3),
unjsonify=False)
def test_get_index_sort_updated_at_desc(self):
"""
Tests that the registry API returns list of
public images sorted by updated_at in descending order.
"""
now = timeutils.utcnow()
time1 = now + datetime.timedelta(seconds=5)
time2 = now
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, created_at=None,
updated_at=time1)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, created_at=None,
updated_at=time2)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='updated_at',
sort_dir='desc')
self.assertEqualImages(images, (UUID3, UUID4, UUID2, UUID1),
unjsonify=False)
def test_image_get_index_marker(self):
"""Test correct set of images returned with marker param."""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='new name! #123',
status='saving')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='new name! #125',
status='saving')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(marker=UUID4)
self.assertEqualImages(images, (UUID3, UUID2, UUID1), unjsonify=False)
def test_image_get_index_limit(self):
"""Test correct number of images returned with limit param."""
extra_fixture = self.get_fixture(id=_gen_uuid(), name='new name! #123',
status='saving')
db_api.image_create(self.context, extra_fixture)
extra_fixture = self.get_fixture(id=_gen_uuid(), name='new name! #125',
status='saving')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(limit=2)
self.assertEqual(len(images), 2)
def test_image_get_index_marker_limit(self):
"""Test correct set of images returned with marker/limit params."""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='new name! #123',
status='saving')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='new name! #125',
status='saving')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(marker=UUID3, limit=1)
self.assertEqualImages(images, (UUID2,), unjsonify=False)
def test_image_get_index_limit_None(self):
"""Test correct set of images returned with limit param == None."""
extra_fixture = self.get_fixture(id=_gen_uuid(), name='new name! #123',
status='saving')
db_api.image_create(self.context, extra_fixture)
extra_fixture = self.get_fixture(id=_gen_uuid(), name='new name! #125',
status='saving')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(limit=None)
self.assertEqual(len(images), 4)
def test_image_get_index_by_name(self):
"""
Test correct set of public, name-filtered image returned. This
is just a sanity check, we test the details call more in-depth.
"""
extra_fixture = self.get_fixture(id=_gen_uuid(), name='new name! #123')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(filters={'name': 'new name! #123'})
self.assertEqual(len(images), 1)
for image in images:
self.assertEqual('new name! #123', image['name'])
def test_image_get_is_public_v2(self):
"""Tests that a detailed call can be filtered by a property"""
extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving',
properties={'is_public': 'avalue'})
context = copy.copy(self.context)
db_api.image_create(context, extra_fixture)
filters = {'is_public': 'avalue'}
images = self.client.image_get_all(filters=filters)
self.assertEqual(len(images), 1)
for image in images:
self.assertEqual('avalue', image['properties'][0]['value'])
def test_image_get(self):
"""Tests that the detailed info about an image returned"""
fixture = self.get_fixture(id=UUID1, name='fake image #1',
is_public=False, size=13,
disk_format='ami', container_format='ami')
data = self.client.image_get(image_id=UUID1)
for k, v in fixture.items():
el = data[k]
self.assertEqual(v, data[k],
"Failed v != data[k] where v = %(v)s and "
"k = %(k)s and data[k] = %(el)s" %
dict(v=v, k=k, el=el))
def test_image_get_non_existing(self):
"""Tests that NotFound is raised when getting a non-existing image"""
self.assertRaises(exception.NotFound,
self.client.image_get,
image_id=_gen_uuid())
def test_image_create_basic(self):
"""Tests that we can add image metadata and returns the new id"""
fixture = self.get_fixture()
new_image = self.client.image_create(values=fixture)
# Test all other attributes set
data = self.client.image_get(image_id=new_image['id'])
for k, v in fixture.items():
self.assertEqual(v, data[k])
# Test status was updated properly
self.assertTrue('status' in data.keys())
self.assertEqual('active', data['status'])
def test_image_create_with_properties(self):
"""Tests that we can add image metadata with properties"""
fixture = self.get_fixture(location="file:///tmp/glance-tests/2",
properties={'distro': 'Ubuntu 10.04 LTS'})
new_image = self.client.image_create(values=fixture)
self.assertIn('properties', new_image)
self.assertEqual(new_image['properties'][0]['value'],
fixture['properties']['distro'])
del fixture['location']
del fixture['properties']
for k, v in fixture.items():
self.assertEqual(v, new_image[k])
# Test status was updated properly
self.assertTrue('status' in new_image.keys())
self.assertEqual('active', new_image['status'])
def test_image_create_already_exists(self):
"""Tests proper exception is raised if image with ID already exists"""
fixture = self.get_fixture(id=UUID2,
location="file:///tmp/glance-tests/2")
self.assertRaises(exception.Duplicate,
self.client.image_create,
values=fixture)
def test_image_create_with_bad_status(self):
"""Tests proper exception is raised if a bad status is set"""
fixture = self.get_fixture(status='bad status',
location="file:///tmp/glance-tests/2")
self.assertRaises(exception.Invalid,
self.client.image_create,
values=fixture)
def test_image_update(self):
"""Tests that the registry API updates the image"""
fixture = {'name': 'fake public image #2',
'disk_format': 'vmdk'}
self.assertTrue(self.client.image_update(image_id=UUID2,
values=fixture))
# Test all other attributes set
data = self.client.image_get(image_id=UUID2)
for k, v in fixture.items():
self.assertEqual(v, data[k])
def test_image_update_not_existing(self):
"""Tests non existing image update doesn't work"""
fixture = self.get_fixture(status='bad status')
self.assertRaises(exception.NotFound,
self.client.image_update,
image_id=_gen_uuid(),
values=fixture)
def test_image_destroy(self):
"""Tests that image metadata is deleted properly"""
# Grab the original number of images
orig_num_images = len(self.client.image_get_all())
# Delete image #2
image = self.FIXTURES[1]
deleted_image = self.client.image_destroy(image_id=image['id'])
self.assertTrue(deleted_image)
self.assertEqual(image['id'], deleted_image['id'])
self.assertTrue(deleted_image['deleted'])
self.assertTrue(deleted_image['deleted_at'])
# Verify one less image
filters = {'deleted': False}
new_num_images = len(self.client.image_get_all(filters=filters))
self.assertEqual(new_num_images, orig_num_images - 1)
def test_image_destroy_not_existing(self):
"""Tests cannot delete non-existing image"""
self.assertRaises(exception.NotFound,
self.client.image_destroy,
image_id=_gen_uuid())
def test_image_get_members(self):
"""Tests getting image members"""
memb_list = self.client.image_member_find(image_id=UUID2)
num_members = len(memb_list)
self.assertEqual(num_members, 0)
def test_image_get_members_not_existing(self):
"""Tests getting non-existent image members"""
self.assertRaises(exception.NotFound,
self.client.image_get_members,
image_id=_gen_uuid())
def test_image_member_find(self):
"""Tests getting member images"""
memb_list = self.client.image_member_find(member='pattieblack')
num_members = len(memb_list)
self.assertEqual(num_members, 0)
def test_add_update_members(self):
"""Tests updating image members"""
values = dict(image_id=UUID2, member='pattieblack')
member = self.client.image_member_create(values=values)
self.assertTrue(member)
values['member'] = 'pattieblack2'
self.assertTrue(self.client.image_member_update(memb_id=member['id'],
values=values))
def test_add_delete_member(self):
"""Tests deleting image members"""
values = dict(image_id=UUID2, member='pattieblack')
member = self.client.image_member_create(values=values)
self.client.image_member_delete(memb_id=member['id'])
memb_list = self.client.image_member_find(member='pattieblack')
self.assertEqual(len(memb_list), 0)
class TestRegistryV2ClientApi(base.IsolatedUnitTest):
"""
Test proper actions made for both valid and invalid requests
against a Registry service
"""
def setUp(self):
"""Establish a clean test environment"""
super(TestRegistryV2ClientApi, self).setUp()
self.mox = mox.Mox()
reload(rapi)
def tearDown(self):
"""Clear the test environment"""
super(TestRegistryV2ClientApi, self).tearDown()
self.mox.UnsetStubs()
def test_configure_registry_client_not_using_use_user_token(self):
self.config(use_user_token=False)
self.mox.StubOutWithMock(rapi, 'configure_registry_admin_creds')
rapi.configure_registry_admin_creds()
self.mox.ReplayAll()
rapi.configure_registry_client()
self.mox.VerifyAll()
def _get_fake_config_creds(self, auth_url='auth_url', strategy='keystone'):
return {
'user': 'user',
'password': 'password',
'username': 'user',
'tenant': 'tenant',
'auth_url': auth_url,
'strategy': strategy,
'region': 'region'
}
def test_configure_registry_admin_creds(self):
expected = self._get_fake_config_creds(auth_url=None,
strategy='configured_strategy')
self.config(admin_user=expected['user'])
self.config(admin_password=expected['password'])
self.config(admin_tenant_name=expected['tenant'])
self.config(auth_strategy=expected['strategy'])
self.config(auth_region=expected['region'])
self.stubs.Set(os, 'getenv', lambda x: None)
self.assertEqual(rapi._CLIENT_CREDS, None)
rapi.configure_registry_admin_creds()
self.assertEqual(rapi._CLIENT_CREDS, expected)
def test_configure_registry_admin_creds_with_auth_url(self):
expected = self._get_fake_config_creds()
self.config(admin_user=expected['user'])
self.config(admin_password=expected['password'])
self.config(admin_tenant_name=expected['tenant'])
self.config(auth_url=expected['auth_url'])
self.config(auth_strategy='test_strategy')
self.config(auth_region=expected['region'])
self.assertEqual(rapi._CLIENT_CREDS, None)
rapi.configure_registry_admin_creds()
self.assertEqual(rapi._CLIENT_CREDS, expected)
|
{
"content_hash": "7fd2f46e1c720361fbbf92f52a75bcf7",
"timestamp": "",
"source": "github",
"line_count": 578,
"max_line_length": 79,
"avg_line_length": 37.24567474048443,
"alnum_prop": 0.5736714975845411,
"repo_name": "cloudbau/glance",
"id": "fd8fe827ffa59a850fbcdaf90d91dea1b0f0f4b7",
"size": "22201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glance/tests/unit/v2/test_registry_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2489476"
},
{
"name": "Shell",
"bytes": "3488"
}
],
"symlink_target": ""
}
|
import os.path
import subprocess
def get_usable_rtmpdump(cmd):
try:
p = subprocess.Popen([cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return cmd
except:
return None
RTMPDUMP = get_usable_rtmpdump('rtmpdump')
def has_rtmpdump_installed():
return RTMPDUMP is not None
#
#params ={"-y":"playlist","-q":None,}
#if Only Key ,Value should be None
#-r -o should not be included in params
def download_rtmpdump_stream(url, title, ext,params={},output_dir='.'):
filename = '%s.%s' % (title, ext)
filepath = os.path.join(output_dir, filename)
cmdline = [RTMPDUMP, '-r']
cmdline.append(url)
cmdline.append('-o')
cmdline.append(filepath)
for key in params.keys():
cmdline.append(key)
if params[key]!=None:
cmdline.append(params[key])
# cmdline.append('-y')
# cmdline.append(playpath)
print("Call rtmpdump:\n"+" ".join(cmdline)+"\n")
subprocess.call(cmdline)
return
#
#To be refactor
#
def play_rtmpdump_stream(player, url, params={}):
cmdline="rtmpdump -r '%s' "%url
for key in params.keys():
cmdline+=key+" "+params[key] if params[key]!=None else ""+" "
cmdline+=" -o - | %s -"%player
print(cmdline)
os.system(cmdline)
# os.system("rtmpdump -r '%s' -y '%s' -o - | %s -" % (url, playpath, player))
return
|
{
"content_hash": "436b8ed666d4809dbf58e0ce9895f782",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 83,
"avg_line_length": 26.32075471698113,
"alnum_prop": 0.6129032258064516,
"repo_name": "lcplj123/video-dl",
"id": "a395700413b91e3a2f73d3be1ef28ead14b0e095",
"size": "1419",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "postproc/rtmpdump.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "148653"
}
],
"symlink_target": ""
}
|
from optparse import OptionParser
def main():
#Gather input and output file paths
parser = OptionParser()
parser.add_option("-i", "--input", dest="i",
help="Input fasta file to be filtered for chimeras.",
metavar="INPUT")
parser.add_option("-n", "--non-chimeras", dest="nc",
help="Text file indicating non-chimeric sequence IDs.",
metavar="NON-CHIMERAS")
parser.add_option("-o", "--output", dest="o",
help="Output fasta file of non-chimeric sequences.",
metavar="OUTPUT")
(options, args) = parser.parse_args()
inputFile = open(options.i, 'r')
ncFile = open(options.nc, 'r')
outputFile = open(options.o, 'w')
#Create arrays for input and non-chimeric sequences
nc = []
for line in ncFile:
nc.append(line.strip())
ncFile.close()
inputData = []
for line in inputFile:
inputData.append(line)
inputFile.close()
#Store input sequences in a dictionary with sequence IDs mapping to the full
#sequence information
seqDict = {}
for i,line in enumerate(inputData):
if '>' in line:
seqID = line.replace('>','')
seqID = seqID.split(' ')
seqID = seqID[0]
seqDict[seqID] = [line, inputData[i+1]]
#Find and export the intersection of non-chimeric sequence IDs with input
#sequence IDs. Export full sequence header and sequence data in fasta
#format.
ncKeys = set(nc).intersection(seqDict)
for key in ncKeys:
outputFile.write(seqDict[key][0])
outputFile.write(seqDict[key][1])
outputFile.close()
if __name__ == "__main__":
main()
|
{
"content_hash": "0e2560d97ed2c765c5b339711cc493e9",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 80,
"avg_line_length": 31.98,
"alnum_prop": 0.6385240775484677,
"repo_name": "sjspence/epicPCR",
"id": "f9d28dfc96d73be29605c8d0501a461627c54299",
"size": "1827",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "discardChimeras.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "662"
},
{
"name": "Python",
"bytes": "11347"
}
],
"symlink_target": ""
}
|
import os
from lib.base_plugin import BasePlugin
class FaerieSolitairePlugin(BasePlugin):
Name = "Faerie Solitaire"
support_os = ["Windows"]
def backup(self, _):
_.add_folder('Saves', os.environ['APPDATA'], 'Faerie Solitaire')
def restore(self, _):
_.restore_folder('Saves', os.environ['APPDATA'], 'Faerie Solitaire')
def detect(self):
if os.path.isdir(os.path.join(os.environ['APPDATA'], 'Faerie Solitaire')):
return True
return False
|
{
"content_hash": "a4994e87cb0cf0d79feb5c55160d3887",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 82,
"avg_line_length": 28.055555555555557,
"alnum_prop": 0.6356435643564357,
"repo_name": "Pr0Ger/SGSB",
"id": "f974fb841aa6a030630e6f31117f97e01a2eca13",
"size": "505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/Faerie_Solitaire.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "69607"
}
],
"symlink_target": ""
}
|
"""
An application for managing IPython profiles.
To be invoked as the `ipython profile` subcommand.
Authors:
* Min RK
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (C) 2008 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
from traitlets.config.application import Application
from IPython.core.application import (
BaseIPythonApplication, base_flags
)
from IPython.core.profiledir import ProfileDir
from IPython.utils.importstring import import_item
from IPython.paths import get_ipython_dir, get_ipython_package_dir
from IPython.utils import py3compat
from traitlets import Unicode, Bool, Dict, observe
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
create_help = """Create an IPython profile by name
Create an ipython profile directory by its name or
profile directory path. Profile directories contain
configuration, log and security related files and are named
using the convention 'profile_<name>'. By default they are
located in your ipython directory. Once created, you will
can edit the configuration files in the profile
directory to configure IPython. Most users will create a
profile directory by name,
`ipython profile create myprofile`, which will put the directory
in `<ipython_dir>/profile_myprofile`.
"""
list_help = """List available IPython profiles
List all available profiles, by profile location, that can
be found in the current working directly or in the ipython
directory. Profile directories are named using the convention
'profile_<profile>'.
"""
profile_help = """Manage IPython profiles
Profile directories contain
configuration, log and security related files and are named
using the convention 'profile_<name>'. By default they are
located in your ipython directory. You can create profiles
with `ipython profile create <name>`, or see the profiles you
already have with `ipython profile list`
To get started configuring IPython, simply do:
$> ipython profile create
and IPython will create the default profile in <ipython_dir>/profile_default,
where you can edit ipython_config.py to start configuring IPython.
"""
_list_examples = "ipython profile list # list all profiles"
_create_examples = """
ipython profile create foo # create profile foo w/ default config files
ipython profile create foo --reset # restage default config files over current
ipython profile create foo --parallel # also stage parallel config files
"""
_main_examples = """
ipython profile create -h # show the help string for the create subcommand
ipython profile list -h # show the help string for the list subcommand
ipython locate profile foo # print the path to the directory for profile 'foo'
"""
#-----------------------------------------------------------------------------
# Profile Application Class (for `ipython profile` subcommand)
#-----------------------------------------------------------------------------
def list_profiles_in(path):
"""list profiles in a given root directory"""
files = os.listdir(path)
profiles = []
for f in files:
try:
full_path = os.path.join(path, f)
except UnicodeError:
continue
if os.path.isdir(full_path) and f.startswith('profile_'):
profiles.append(f.split('_',1)[-1])
return profiles
def list_bundled_profiles():
"""list profiles that are bundled with IPython."""
path = os.path.join(get_ipython_package_dir(), u'core', u'profile')
files = os.listdir(path)
profiles = []
for profile in files:
full_path = os.path.join(path, profile)
if os.path.isdir(full_path) and profile != "__pycache__":
profiles.append(profile)
return profiles
class ProfileLocate(BaseIPythonApplication):
description = """print the path to an IPython profile dir"""
def parse_command_line(self, argv=None):
super(ProfileLocate, self).parse_command_line(argv)
if self.extra_args:
self.profile = self.extra_args[0]
def start(self):
print(self.profile_dir.location)
class ProfileList(Application):
name = u'ipython-profile'
description = list_help
examples = _list_examples
aliases = Dict({
'ipython-dir' : 'ProfileList.ipython_dir',
'log-level' : 'Application.log_level',
})
flags = Dict(dict(
debug = ({'Application' : {'log_level' : 0}},
"Set Application.log_level to 0, maximizing log output."
)
))
ipython_dir = Unicode(get_ipython_dir(),
help="""
The name of the IPython directory. This directory is used for logging
configuration (through profiles), history storage, etc. The default
is usually $HOME/.ipython. This options can also be specified through
the environment variable IPYTHONDIR.
"""
).tag(config=True)
def _print_profiles(self, profiles):
"""print list of profiles, indented."""
for profile in profiles:
print(' %s' % profile)
def list_profile_dirs(self):
profiles = list_bundled_profiles()
if profiles:
print()
print("Available profiles in IPython:")
self._print_profiles(profiles)
print()
print(" The first request for a bundled profile will copy it")
print(" into your IPython directory (%s)," % self.ipython_dir)
print(" where you can customize it.")
profiles = list_profiles_in(self.ipython_dir)
if profiles:
print()
print("Available profiles in %s:" % self.ipython_dir)
self._print_profiles(profiles)
profiles = list_profiles_in(py3compat.getcwd())
if profiles:
print()
print("Available profiles in current directory (%s):" % py3compat.getcwd())
self._print_profiles(profiles)
print()
print("To use any of the above profiles, start IPython with:")
print(" ipython --profile=<name>")
print()
def start(self):
self.list_profile_dirs()
create_flags = {}
create_flags.update(base_flags)
# don't include '--init' flag, which implies running profile create in other apps
create_flags.pop('init')
create_flags['reset'] = ({'ProfileCreate': {'overwrite' : True}},
"reset config files in this profile to the defaults.")
create_flags['parallel'] = ({'ProfileCreate': {'parallel' : True}},
"Include the config files for parallel "
"computing apps (ipengine, ipcontroller, etc.)")
class ProfileCreate(BaseIPythonApplication):
name = u'ipython-profile'
description = create_help
examples = _create_examples
auto_create = Bool(True)
def _log_format_default(self):
return "[%(name)s] %(message)s"
def _copy_config_files_default(self):
return True
parallel = Bool(False,
help="whether to include parallel computing config files"
).tag(config=True)
@observe('parallel')
def _parallel_changed(self, change):
parallel_files = [ 'ipcontroller_config.py',
'ipengine_config.py',
'ipcluster_config.py'
]
if change['new']:
for cf in parallel_files:
self.config_files.append(cf)
else:
for cf in parallel_files:
if cf in self.config_files:
self.config_files.remove(cf)
def parse_command_line(self, argv):
super(ProfileCreate, self).parse_command_line(argv)
# accept positional arg as profile name
if self.extra_args:
self.profile = self.extra_args[0]
flags = Dict(create_flags)
classes = [ProfileDir]
def _import_app(self, app_path):
"""import an app class"""
app = None
name = app_path.rsplit('.', 1)[-1]
try:
app = import_item(app_path)
except ImportError:
self.log.info("Couldn't import %s, config file will be excluded", name)
except Exception:
self.log.warning('Unexpected error importing %s', name, exc_info=True)
return app
def init_config_files(self):
super(ProfileCreate, self).init_config_files()
# use local imports, since these classes may import from here
from IPython.terminal.ipapp import TerminalIPythonApp
apps = [TerminalIPythonApp]
for app_path in (
'ipykernel.kernelapp.IPKernelApp',
):
app = self._import_app(app_path)
if app is not None:
apps.append(app)
if self.parallel:
from ipyparallel.apps.ipcontrollerapp import IPControllerApp
from ipyparallel.apps.ipengineapp import IPEngineApp
from ipyparallel.apps.ipclusterapp import IPClusterStart
apps.extend([
IPControllerApp,
IPEngineApp,
IPClusterStart,
])
for App in apps:
app = App()
app.config.update(self.config)
app.log = self.log
app.overwrite = self.overwrite
app.copy_config_files=True
app.ipython_dir=self.ipython_dir
app.profile_dir=self.profile_dir
app.init_config_files()
def stage_default_config_file(self):
pass
class ProfileApp(Application):
name = u'ipython profile'
description = profile_help
examples = _main_examples
subcommands = Dict(dict(
create = (ProfileCreate, ProfileCreate.description.splitlines()[0]),
list = (ProfileList, ProfileList.description.splitlines()[0]),
locate = (ProfileLocate, ProfileLocate.description.splitlines()[0]),
))
def start(self):
if self.subapp is None:
print("No subcommand specified. Must specify one of: %s"%(self.subcommands.keys()))
print()
self.print_description()
self.print_subcommands()
self.exit(1)
else:
return self.subapp.start()
|
{
"content_hash": "b6ce6c9289dc0786878a69e2eec1c7fd",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 95,
"avg_line_length": 34.26198083067093,
"alnum_prop": 0.6010816859380828,
"repo_name": "lancezlin/ml_template_py",
"id": "b8e5fd26ac3fd343dd91305db51e5041dcae134c",
"size": "10742",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/IPython/core/profileapp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "326933"
},
{
"name": "C++",
"bytes": "14430"
},
{
"name": "CSS",
"bytes": "7806"
},
{
"name": "FORTRAN",
"bytes": "3200"
},
{
"name": "HTML",
"bytes": "596861"
},
{
"name": "JavaScript",
"bytes": "4020233"
},
{
"name": "Jupyter Notebook",
"bytes": "517957"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "41191064"
},
{
"name": "Shell",
"bytes": "3373"
},
{
"name": "Smarty",
"bytes": "26298"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from django.http import QueryDict
# FILEBROWSER IMPORTS
from filebrowser.templatetags.fb_tags import get_file_extensions
class TemplateTagsTests(TestCase):
def test_get_file_extensions(self):
self.assertEqual(get_file_extensions(''),
['.pdf', '.doc', '.rtf', '.txt', '.xls', '.csv', '.docx', '.mov', \
'.wmv', '.mpeg', '.mpg', '.avi', '.rm', '.jpg', '.jpeg', '.gif', '.png', \
'.tif', '.tiff', '.mp3', '.mp4', '.wav', '.aiff', '.midi', '.m4p'])
self.assertEqual(
get_file_extensions(QueryDict('type=image')),
['.jpg', '.jpeg', '.gif', '.png', '.tif', '.tiff']
)
|
{
"content_hash": "e34e6a2d659840dda22e8944e97bf17a",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 86,
"avg_line_length": 40.35294117647059,
"alnum_prop": 0.5408163265306123,
"repo_name": "cbingos/cpro",
"id": "0db4a841958d8af939d03523d1b5818c6acce483",
"size": "720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "filebrowser/tests/test_templatetags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "332547"
},
{
"name": "JavaScript",
"bytes": "1481040"
},
{
"name": "OpenEdge ABL",
"bytes": "3336406"
},
{
"name": "Python",
"bytes": "10218595"
},
{
"name": "Ruby",
"bytes": "879"
},
{
"name": "TeX",
"bytes": "2217"
}
],
"symlink_target": ""
}
|
"""Test for SQL component Init."""
from homeassistant import config_entries
from homeassistant.core import HomeAssistant
from . import init_integration
async def test_setup_entry(hass: HomeAssistant) -> None:
"""Test setup entry."""
config_entry = await init_integration(hass)
assert config_entry.state == config_entries.ConfigEntryState.LOADED
async def test_unload_entry(hass: HomeAssistant) -> None:
"""Test unload an entry."""
config_entry = await init_integration(hass)
assert config_entry.state == config_entries.ConfigEntryState.LOADED
assert await hass.config_entries.async_unload(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is config_entries.ConfigEntryState.NOT_LOADED
|
{
"content_hash": "ca1bca6fde1fa08ed5e284eb5d7f169b",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 75,
"avg_line_length": 36.23809523809524,
"alnum_prop": 0.7516425755584757,
"repo_name": "w1ll1am23/home-assistant",
"id": "5c3f237ac498cf9775d10f0877a0c4cd69dfa6df",
"size": "761",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/sql/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
from engine.stratanalyzer import returns
from engine import warninghelpers
from engine import broker
import datetime
class PositionState(object):
def onEnter(self, position):
pass
# Raise an exception if an order can't be submitted in the current state.
def canSubmitOrder(self, position, order):
raise NotImplementedError()
def onOrderEvent(self, position, orderEvent):
raise NotImplementedError()
def isOpen(self, position):
raise NotImplementedError()
def exit(self, position, stopPrice=None, limitPrice=None, goodTillCanceled=None):
raise NotImplementedError()
class WaitingEntryState(PositionState):
def canSubmitOrder(self, position, order):
if position.entryActive():
raise Exception("The entry order is still active")
def onOrderEvent(self, position, orderEvent):
# Only entry order events are valid in this state.
assert(position.getEntryOrder().getId() == orderEvent.getOrder().getId())
if orderEvent.getEventType() in (broker.OrderEvent.Type.FILLED, broker.OrderEvent.Type.PARTIALLY_FILLED):
position.switchState(OpenState())
position.getStrategy().onEnterOk(position)
elif orderEvent.getEventType() == broker.OrderEvent.Type.CANCELED:
assert(position.getEntryOrder().getFilled() == 0)
position.switchState(ClosedState())
position.getStrategy().onEnterCanceled(position)
def isOpen(self, position):
return True
def exit(self, position, stopPrice=None, limitPrice=None, goodTillCanceled=None):
assert(position.getShares() == 0)
assert(position.getEntryOrder().isActive())
position.getStrategy().getBroker().cancelOrder(position.getEntryOrder())
class OpenState(PositionState):
def onEnter(self, position):
entryDateTime = position.getEntryOrder().getExecutionInfo().getDateTime()
position.setEntryDateTime(entryDateTime)
def canSubmitOrder(self, position, order):
# Only exit orders should be submitted in this state.
pass
def onOrderEvent(self, position, orderEvent):
if position.getExitOrder() and position.getExitOrder().getId() == orderEvent.getOrder().getId():
if orderEvent.getEventType() == broker.OrderEvent.Type.FILLED:
if position.getShares() == 0:
position.switchState(ClosedState())
position.getStrategy().onExitOk(position)
elif orderEvent.getEventType() == broker.OrderEvent.Type.CANCELED:
assert(position.getShares() != 0)
position.getStrategy().onExitCanceled(position)
elif position.getEntryOrder().getId() == orderEvent.getOrder().getId():
# Nothing to do since the entry order may be completely filled or canceled after a partial fill.
assert(position.getShares() != 0)
else:
raise Exception("Invalid order event '%s' in OpenState" % (orderEvent.getEventType()))
def isOpen(self, position):
return True
def exit(self, position, stopPrice=None, limitPrice=None, goodTillCanceled=None):
assert(position.getShares() != 0)
# Fail if a previous exit order is active.
if position.exitActive():
raise Exception("Exit order is active and it should be canceled first")
# If the entry order is active, request cancellation.
if position.entryActive():
position.getStrategy().getBroker().cancelOrder(position.getEntryOrder())
position._submitExitOrder(stopPrice, limitPrice, goodTillCanceled)
class ClosedState(PositionState):
def onEnter(self, position):
# Set the exit datetime if the exit order was filled.
if position.exitFilled():
exitDateTime = position.getExitOrder().getExecutionInfo().getDateTime()
position.setExitDateTime(exitDateTime)
assert(position.getShares() == 0)
position.getStrategy().unregisterPosition(position)
def canSubmitOrder(self, position, order):
raise Exception("The position is closed")
def onOrderEvent(self, position, orderEvent):
raise Exception("Invalid order event '%s' in ClosedState" % (orderEvent.getEventType()))
def isOpen(self, position):
return False
def exit(self, position, stopPrice=None, limitPrice=None, goodTillCanceled=None):
pass
class Position(object):
"""Base class for positions.
Positions are higher level abstractions for placing orders.
They are escentially a pair of entry-exit orders and allow
to track returns and PnL easier that placing orders manually.
:param strategy: The strategy that this position belongs to.
:type strategy: :class:`engine.strategy.BaseStrategy`.
:param entryOrder: The order used to enter the position.
:type entryOrder: :class:`engine.broker.Order`
:param goodTillCanceled: True if the entry order should be set as good till canceled.
:type goodTillCanceled: boolean.
:param allOrNone: True if the orders should be completely filled or not at all.
:type allOrNone: boolean.
.. note::
This is a base class and should not be used directly.
"""
def __init__(self, strategy, entryOrder, goodTillCanceled, allOrNone):
# The order must be created but not submitted.
assert(entryOrder.isInitial())
self.__state = None
self.__activeOrders = {}
self.__shares = 0
self.__strategy = strategy
self.__entryOrder = None
self.__entryDateTime = None
self.__exitOrder = None
self.__exitDateTime = None
self.__posTracker = returns.PositionTracker(entryOrder.getInstrumentTraits())
self.__allOrNone = allOrNone
self.switchState(WaitingEntryState())
entryOrder.setGoodTillCanceled(goodTillCanceled)
entryOrder.setAllOrNone(allOrNone)
self.__submitAndRegisterOrder(entryOrder)
self.__entryOrder = entryOrder
def __submitAndRegisterOrder(self, order):
assert(order.isInitial())
# Check if an order can be submitted in the current state.
self.__state.canSubmitOrder(self, order)
# This may raise an exception, so we wan't to submit the order before moving forward and registering
# the order in the strategy.
self.getStrategy().getBroker().submitOrder(order)
self.__activeOrders[order.getId()] = order
self.getStrategy().registerPositionOrder(self, order)
def setEntryDateTime(self, dateTime):
self.__entryDateTime = dateTime
def setExitDateTime(self, dateTime):
self.__exitDateTime = dateTime
def switchState(self, newState):
self.__state = newState
self.__state.onEnter(self)
def getStrategy(self):
return self.__strategy
def getLastPrice(self):
return self.__strategy.getLastPrice(self.getInstrument())
def getActiveOrders(self):
return self.__activeOrders.values()
def getShares(self):
"""Returns the number of shares.
This will be a possitive number for a long position, and a negative number for a short position.
.. note::
If the entry order was not filled, or if the position is closed, then the number of shares will be 0.
"""
return self.__shares
def entryActive(self):
"""Returns True if the entry order is active."""
return self.__entryOrder is not None and self.__entryOrder.isActive()
def entryFilled(self):
"""Returns True if the entry order was filled."""
return self.__entryOrder is not None and self.__entryOrder.isFilled()
def exitActive(self):
"""Returns True if the exit order is active."""
return self.__exitOrder is not None and self.__exitOrder.isActive()
def exitFilled(self):
"""Returns True if the exit order was filled."""
return self.__exitOrder is not None and self.__exitOrder.isFilled()
def getEntryOrder(self):
"""Returns the :class:`engine.broker.Order` used to enter the position."""
return self.__entryOrder
def getExitOrder(self):
"""Returns the :class:`engine.broker.Order` used to exit the position. If this position hasn't been closed yet, None is returned."""
return self.__exitOrder
def getInstrument(self):
"""Returns the instrument used for this position."""
return self.__entryOrder.getInstrument()
def getReturn(self, includeCommissions=True):
"""
Calculates cumulative percentage returns up to this point.
If the position is not closed, these will be unrealized returns.
"""
# Deprecated in v0.18.
if includeCommissions is False:
warninghelpers.deprecation_warning("includeCommissions will be deprecated in the next version.", stacklevel=2)
ret = 0
price = self.getLastPrice()
if price is not None:
ret = self.__posTracker.getReturn(price, includeCommissions)
return ret
def getPnL(self, includeCommissions=True):
"""
Calculates PnL up to this point.
If the position is not closed, these will be unrealized PnL.
"""
# Deprecated in v0.18.
if includeCommissions is False:
warninghelpers.deprecation_warning("includeCommissions will be deprecated in the next version.", stacklevel=2)
ret = 0
price = self.getLastPrice()
if price is not None:
ret = self.__posTracker.getPnL(price=price, includeCommissions=includeCommissions)
return ret
def cancelEntry(self):
"""Cancels the entry order if its active."""
if self.entryActive():
self.getStrategy().getBroker().cancelOrder(self.getEntryOrder())
def cancelExit(self):
"""Cancels the exit order if its active."""
if self.exitActive():
self.getStrategy().getBroker().cancelOrder(self.getExitOrder())
def exitMarket(self, goodTillCanceled=None):
"""Submits a market order to close this position.
:param goodTillCanceled: True if the exit order is good till canceled. If False then the order gets automatically canceled when the session closes. If None, then it will match the entry order.
:type goodTillCanceled: boolean.
.. note::
* If the position is closed (entry canceled or exit filled) this won't have any effect.
* If the exit order for this position is pending, an exception will be raised. The exit order should be canceled first.
* If the entry order is active, cancellation will be requested.
"""
self.__state.exit(self, None, None, goodTillCanceled)
def exitLimit(self, limitPrice, goodTillCanceled=None):
"""Submits a limit order to close this position.
:param limitPrice: The limit price.
:type limitPrice: float.
:param goodTillCanceled: True if the exit order is good till canceled. If False then the order gets automatically canceled when the session closes. If None, then it will match the entry order.
:type goodTillCanceled: boolean.
.. note::
* If the position is closed (entry canceled or exit filled) this won't have any effect.
* If the exit order for this position is pending, an exception will be raised. The exit order should be canceled first.
* If the entry order is active, cancellation will be requested.
"""
self.__state.exit(self, None, limitPrice, goodTillCanceled)
def exitStop(self, stopPrice, goodTillCanceled=None):
"""Submits a stop order to close this position.
:param stopPrice: The stop price.
:type stopPrice: float.
:param goodTillCanceled: True if the exit order is good till canceled. If False then the order gets automatically canceled when the session closes. If None, then it will match the entry order.
:type goodTillCanceled: boolean.
.. note::
* If the position is closed (entry canceled or exit filled) this won't have any effect.
* If the exit order for this position is pending, an exception will be raised. The exit order should be canceled first.
* If the entry order is active, cancellation will be requested.
"""
self.__state.exit(self, stopPrice, None, goodTillCanceled)
def exitStopLimit(self, stopPrice, limitPrice, goodTillCanceled=None):
"""Submits a stop limit order to close this position.
:param stopPrice: The stop price.
:type stopPrice: float.
:param limitPrice: The limit price.
:type limitPrice: float.
:param goodTillCanceled: True if the exit order is good till canceled. If False then the order gets automatically canceled when the session closes. If None, then it will match the entry order.
:type goodTillCanceled: boolean.
.. note::
* If the position is closed (entry canceled or exit filled) this won't have any effect.
* If the exit order for this position is pending, an exception will be raised. The exit order should be canceled first.
* If the entry order is active, cancellation will be requested.
"""
self.__state.exit(self, stopPrice, limitPrice, goodTillCanceled)
def _submitExitOrder(self, stopPrice, limitPrice, goodTillCanceled):
assert(not self.exitActive())
exitOrder = self.buildExitOrder(stopPrice, limitPrice)
# If goodTillCanceled was not set, match the entry order.
if goodTillCanceled is None:
goodTillCanceled = self.__entryOrder.getGoodTillCanceled()
exitOrder.setGoodTillCanceled(goodTillCanceled)
exitOrder.setAllOrNone(self.__allOrNone)
self.__submitAndRegisterOrder(exitOrder)
self.__exitOrder = exitOrder
def onOrderEvent(self, orderEvent):
self.__updatePosTracker(orderEvent)
order = orderEvent.getOrder()
if not order.isActive():
del self.__activeOrders[order.getId()]
# Update the number of shares.
if orderEvent.getEventType() in (broker.OrderEvent.Type.PARTIALLY_FILLED, broker.OrderEvent.Type.FILLED):
execInfo = orderEvent.getEventInfo()
# roundQuantity is used to prevent bugs like the one triggered in testcases.bitstamp_test:TestCase.testRoundingBug
if order.isBuy():
self.__shares = order.getInstrumentTraits().roundQuantity(self.__shares + execInfo.getQuantity())
else:
self.__shares = order.getInstrumentTraits().roundQuantity(self.__shares - execInfo.getQuantity())
self.__state.onOrderEvent(self, orderEvent)
def __updatePosTracker(self, orderEvent):
if orderEvent.getEventType() in (broker.OrderEvent.Type.PARTIALLY_FILLED, broker.OrderEvent.Type.FILLED):
order = orderEvent.getOrder()
execInfo = orderEvent.getEventInfo()
if order.isBuy():
self.__posTracker.buy(execInfo.getQuantity(), execInfo.getPrice(), execInfo.getCommission())
else:
self.__posTracker.sell(execInfo.getQuantity(), execInfo.getPrice(), execInfo.getCommission())
def buildExitOrder(self, stopPrice, limitPrice):
raise NotImplementedError()
def isOpen(self):
"""Returns True if the position is open."""
return self.__state.isOpen(self)
def getAge(self):
"""Returns the duration in open state.
:rtype: datetime.timedelta.
.. note::
* If the position is open, then the difference between the entry datetime and the datetime of the last bar is returned.
* If the position is closed, then the difference between the entry datetime and the exit datetime is returned.
"""
ret = datetime.timedelta()
if self.__entryDateTime is not None:
if self.__exitDateTime is not None:
last = self.__exitDateTime
else:
last = self.__strategy.getCurrentDateTime()
ret = last - self.__entryDateTime
return ret
# This class is reponsible for order management in long positions.
class LongPosition(Position):
def __init__(self, strategy, instrument, stopPrice, limitPrice, quantity, goodTillCanceled, allOrNone):
if limitPrice is None and stopPrice is None:
entryOrder = strategy.getBroker().createMarketOrder(broker.Order.Action.BUY, instrument, quantity, False)
elif limitPrice is not None and stopPrice is None:
entryOrder = strategy.getBroker().createLimitOrder(broker.Order.Action.BUY, instrument, limitPrice, quantity)
elif limitPrice is None and stopPrice is not None:
entryOrder = strategy.getBroker().createStopOrder(broker.Order.Action.BUY, instrument, stopPrice, quantity)
elif limitPrice is not None and stopPrice is not None:
entryOrder = strategy.getBroker().createStopLimitOrder(broker.Order.Action.BUY, instrument, stopPrice, limitPrice, quantity)
else:
assert(False)
super(LongPosition, self).__init__(strategy, entryOrder, goodTillCanceled, allOrNone)
def buildExitOrder(self, stopPrice, limitPrice):
quantity = self.getShares()
assert(quantity > 0)
if limitPrice is None and stopPrice is None:
ret = self.getStrategy().getBroker().createMarketOrder(broker.Order.Action.SELL, self.getInstrument(), quantity, False)
elif limitPrice is not None and stopPrice is None:
ret = self.getStrategy().getBroker().createLimitOrder(broker.Order.Action.SELL, self.getInstrument(), limitPrice, quantity)
elif limitPrice is None and stopPrice is not None:
ret = self.getStrategy().getBroker().createStopOrder(broker.Order.Action.SELL, self.getInstrument(), stopPrice, quantity)
elif limitPrice is not None and stopPrice is not None:
ret = self.getStrategy().getBroker().createStopLimitOrder(broker.Order.Action.SELL, self.getInstrument(), stopPrice, limitPrice, quantity)
else:
assert(False)
return ret
# This class is reponsible for order management in short positions.
class ShortPosition(Position):
def __init__(self, strategy, instrument, stopPrice, limitPrice, quantity, goodTillCanceled, allOrNone):
if limitPrice is None and stopPrice is None:
entryOrder = strategy.getBroker().createMarketOrder(broker.Order.Action.SELL_SHORT, instrument, quantity, False)
elif limitPrice is not None and stopPrice is None:
entryOrder = strategy.getBroker().createLimitOrder(broker.Order.Action.SELL_SHORT, instrument, limitPrice, quantity)
elif limitPrice is None and stopPrice is not None:
entryOrder = strategy.getBroker().createStopOrder(broker.Order.Action.SELL_SHORT, instrument, stopPrice, quantity)
elif limitPrice is not None and stopPrice is not None:
entryOrder = strategy.getBroker().createStopLimitOrder(broker.Order.Action.SELL_SHORT, instrument, stopPrice, limitPrice, quantity)
else:
assert(False)
super(ShortPosition, self).__init__(strategy, entryOrder, goodTillCanceled, allOrNone)
def buildExitOrder(self, stopPrice, limitPrice):
quantity = self.getShares() * -1
assert(quantity > 0)
if limitPrice is None and stopPrice is None:
ret = self.getStrategy().getBroker().createMarketOrder(broker.Order.Action.BUY_TO_COVER, self.getInstrument(), quantity, False)
elif limitPrice is not None and stopPrice is None:
ret = self.getStrategy().getBroker().createLimitOrder(broker.Order.Action.BUY_TO_COVER, self.getInstrument(), limitPrice, quantity)
elif limitPrice is None and stopPrice is not None:
ret = self.getStrategy().getBroker().createStopOrder(broker.Order.Action.BUY_TO_COVER, self.getInstrument(), stopPrice, quantity)
elif limitPrice is not None and stopPrice is not None:
ret = self.getStrategy().getBroker().createStopLimitOrder(broker.Order.Action.BUY_TO_COVER, self.getInstrument(), stopPrice, limitPrice, quantity)
else:
assert(False)
return ret
|
{
"content_hash": "64d3c4a2b1f3cf20e06dcd1379a286e0",
"timestamp": "",
"source": "github",
"line_count": 468,
"max_line_length": 200,
"avg_line_length": 43.92948717948718,
"alnum_prop": 0.6735249768957634,
"repo_name": "Yam-cn/potato",
"id": "91ff6f0f407fdfb511caa14b5ad8b152e0db13bf",
"size": "21173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "engine/strategy/position.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "482582"
},
{
"name": "C++",
"bytes": "499680"
},
{
"name": "CSS",
"bytes": "269141"
},
{
"name": "HTML",
"bytes": "3627157"
},
{
"name": "JavaScript",
"bytes": "1343865"
},
{
"name": "PHP",
"bytes": "34371"
},
{
"name": "Python",
"bytes": "1484158"
},
{
"name": "Shell",
"bytes": "1044"
}
],
"symlink_target": ""
}
|
"""
An HTTP service for serving pages for tests, with
a configurable delay that can be passed as a
query parameter in a GET request.
"""
import os
from time import sleep
from urllib.parse import urlparse, parse_qs
from http.server import HTTPServer, SimpleHTTPRequestHandler
class DelayedRequestHandler(SimpleHTTPRequestHandler):
"""
Request handler with a configurable delay for testing
"""
def __init__(self, *args, **kwargs):
SimpleHTTPRequestHandler.__init__(self, *args, **kwargs)
self.path = None
def do_GET(self):
"""
Check parameters to see if a delay was specified.
If so then wait and then serve the GET request.
"""
# Parse the url into components
parsed_url = urlparse(self.path)
# Determine if delay was passed as a parameter
delay_time = parse_qs(parsed_url.query).get('delay')
if delay_time:
# Values are passed as a list of strings
# so keep the first value and convert to a float.
sleep(float(delay_time[0]))
# Prepend "tests/site" to the path because that
# is where the test files should be served from.
self.path = f"tests/site{self.path}"
return SimpleHTTPRequestHandler.do_GET(self)
def main():
"""
Start an HTTP server on the port specified in the SERVER_PORT
environment variable. Serves the files located under ``tests/site``.
"""
handler_class = DelayedRequestHandler
handler_class.protocol_version = "HTTP/1.0"
port = int(os.environ['SERVER_PORT'])
server_address = ('', port)
httpd = HTTPServer(server_address, handler_class)
address = httpd.socket.getsockname()
print("Serving HTTP on", address[0], "port", address[1], "...")
httpd.serve_forever()
if __name__ == "__main__":
main()
|
{
"content_hash": "538fe50187616102aa0a3a01bc2e75a2",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 73,
"avg_line_length": 30.37704918032787,
"alnum_prop": 0.6475984889368591,
"repo_name": "edx/bok-choy",
"id": "930f67138c8f5f1b2d64f5dd7aed35d56cd6f53e",
"size": "1853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/http_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "11992"
},
{
"name": "JavaScript",
"bytes": "1203"
},
{
"name": "Makefile",
"bytes": "2497"
},
{
"name": "Python",
"bytes": "181702"
}
],
"symlink_target": ""
}
|
"""A libusb1-based fastboot implementation."""
import binascii
import collections
import logging
import os
import struct
from . import usb_exceptions
from openhtf.util import argv
import six
FASTBOOT_DOWNLOAD_CHUNK_SIZE_KB = 1024
ARG_PARSER = argv.ModuleParser()
ARG_PARSER.add_argument(
'--fastboot_download_chunk_size_kb',
default=FASTBOOT_DOWNLOAD_CHUNK_SIZE_KB,
action=argv.StoreInModule,
type=int,
target='%s.FASTBOOT_DOWNLOAD_CHUNK_SIZE_KB' % __name__,
help='Size of chunks to send when downloading fastboot images')
_LOG = logging.getLogger(__name__)
DEFAULT_MESSAGE_CALLBACK = lambda m: _LOG.info('Got %s from device', m)
FastbootMessage = collections.namedtuple( # pylint: disable=invalid-name
'FastbootMessage', ['message', 'header'])
class FastbootProtocol(object):
"""Encapsulates the fastboot protocol."""
FINAL_HEADERS = {'OKAY', 'DATA'}
def __init__(self, usb):
"""Constructs a FastbootProtocol instance.
Arguments:
usb: UsbHandle instance.
"""
self.usb = usb
@property
def usb_handle(self):
"""This instance's USB handle."""
return self.usb
def send_command(self, command, arg=None):
"""Sends a command to the device.
Args:
command: The command to send.
arg: Optional argument to the command.
"""
if arg is not None:
command = '%s:%s' % (command, arg)
self._write(six.StringIO(command), len(command))
def handle_simple_responses(
self, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK):
"""Accepts normal responses from the device.
Args:
timeout_ms: Timeout in milliseconds to wait for each response.
info_cb: Optional callback for text sent from the bootloader.
Returns:
OKAY packet's message.
"""
return self._accept_responses('OKAY', info_cb, timeout_ms=timeout_ms)
# pylint: disable=too-many-arguments
def handle_data_sending(self, source_file, source_len,
info_cb=DEFAULT_MESSAGE_CALLBACK,
progress_callback=None, timeout_ms=None):
"""Handles the protocol for sending data to the device.
Arguments:
source_file: File-object to read from for the device.
source_len: Amount of data, in bytes, to send to the device.
info_cb: Optional callback for text sent from the bootloader.
progress_callback: Callback that takes the current and the total progress
of the current file.
timeout_ms: Timeout in milliseconds to wait for each response.
Raises:
FastbootTransferError: When fastboot can't handle this amount of data.
FastbootStateMismatch: Fastboot responded with the wrong packet type.
FastbootRemoteFailure: Fastboot reported failure.
FastbootInvalidResponse: Fastboot responded with an unknown packet type.
Returns:
OKAY packet's message.
"""
accepted_size = self._accept_responses(
'DATA', info_cb, timeout_ms=timeout_ms)
accepted_size = binascii.unhexlify(accepted_size[:8])
accepted_size, = struct.unpack('>I', accepted_size)
if accepted_size != source_len:
raise usb_exceptions.FastbootTransferError(
'Device refused to download %s bytes of data (accepts %s bytes)',
source_len, accepted_size)
self._write(source_file, accepted_size, progress_callback)
return self._accept_responses('OKAY', info_cb, timeout_ms=timeout_ms)
# pylint: enable=too-many-arguments
def _accept_responses(self, expected_header, info_cb, timeout_ms=None):
"""Accepts responses until the expected header or a FAIL.
Arguments:
expected_header: OKAY or DATA
info_cb: Optional callback for text sent from the bootloader.
timeout_ms: Timeout in milliseconds to wait for each response.
Raises:
FastbootStateMismatch: Fastboot responded with the wrong packet type.
FastbootRemoteFailure: Fastboot reported failure.
FastbootInvalidResponse: Fastboot responded with an unknown packet type.
Returns:
OKAY packet's message.
"""
while True:
response = self.usb.read(64, timeout_ms=timeout_ms)
header = response[:4]
remaining = response[4:]
if header == 'INFO':
info_cb(FastbootMessage(remaining, header))
elif header in self.FINAL_HEADERS:
if header != expected_header:
raise usb_exceptions.FastbootStateMismatch(
'Expected %s, got %s', expected_header, header)
if header == 'OKAY':
info_cb(FastbootMessage(remaining, header))
return remaining
elif header == 'FAIL':
info_cb(FastbootMessage(remaining, header))
raise usb_exceptions.FastbootRemoteFailure('FAIL: %s', remaining)
else:
raise usb_exceptions.FastbootInvalidResponse(
'Got unknown header %s and response %s', header, remaining)
def _handle_progress(self, total, progress_callback): # pylint: disable=no-self-use
"""Calls the callback with the current progress and total ."""
current = 0
while True:
current += yield
try:
progress_callback(current, total)
except Exception: # pylint: disable=broad-except
_LOG.exception('Progress callback raised an exception. %s',
progress_callback)
continue
def _write(self, data, length, progress_callback=None):
"""Sends the data to the device, tracking progress with the callback."""
if progress_callback:
progress = self._handle_progress(length, progress_callback)
six.next(progress)
while length:
tmp = data.read(FASTBOOT_DOWNLOAD_CHUNK_SIZE_KB * 1024)
length -= len(tmp)
self.usb.write(tmp)
if progress_callback:
progress.send(len(tmp))
class FastbootCommands(object):
"""Encapsulates the fastboot commands."""
protocol_handler = FastbootProtocol
def __init__(self, usb):
"""Constructs a FastbootCommands instance.
Arguments:
usb: UsbHandle instance.
"""
self._usb = usb
self._protocol = self.protocol_handler(usb)
@property
def handle(self):
"""This instance's USB handle."""
return self._usb
def close(self):
"""Close the USB handle."""
self._usb.close()
def _simple_command(self, command, arg=None, **kwargs):
"""Send a simple command."""
self._protocol.send_command(command, arg)
return self._protocol.handle_simple_responses(**kwargs)
# pylint: disable=too-many-arguments
def flash_from_file(self, partition, source_file, source_len=0,
info_cb=DEFAULT_MESSAGE_CALLBACK, progress_callback=None,
timeout_ms=None):
"""Flashes a partition from the file on disk.
Args:
partition: Partition name to flash to.
source_file: Filename to download to the device.
source_len: Optional length of source_file, uses os.stat if not provided.
info_cb: See Download.
progress_callback: See Download.
timeout_ms: The amount of time to wait on okay after flashing.
Returns:
Download and flash responses, normally nothing.
"""
if source_len == 0:
# Fall back to stat.
source_len = os.stat(source_file).st_size
download_response = self.download(
source_file, source_len=source_len, info_cb=info_cb,
progress_callback=progress_callback)
flash_response = self.flash(partition, info_cb=info_cb,
timeout_ms=timeout_ms)
return download_response + flash_response
# pylint: enable=too-many-arguments
def download(self, source_file, source_len=0,
info_cb=DEFAULT_MESSAGE_CALLBACK, progress_callback=None):
"""Downloads a file to the device.
Args:
source_file: A filename or file-like object to download to the device.
source_len: Optional length of source_file. If source_file is a file-like
object and source_len is not provided, source_file is read into
memory.
info_cb: Optional callback accepting FastbootMessage for text sent from
the bootloader.
progress_callback: Optional callback called with the percent of the
source_file downloaded. Note, this doesn't include progress of the
actual flashing.
Returns:
Response to a download request, normally nothing.
"""
if isinstance(source_file, six.string_types):
source_len = os.stat(source_file).st_size
source_file = open(source_file)
if source_len == 0:
# Fall back to storing it all in memory :(
data = source_file.read()
source_file = six.StringIO(data)
source_len = len(data)
self._protocol.send_command('download', '%08x' % source_len)
return self._protocol.handle_data_sending(
source_file, source_len, info_cb, progress_callback=progress_callback)
def flash(self, partition, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK):
"""Flashes the last downloaded file to the given partition.
Args:
partition: Partition to flash.
timeout_ms: Optional timeout in milliseconds to wait for it to finish.
info_cb: See Download. Usually no messages.
Returns:
Response to a download request, normally nothing.
"""
return self._simple_command('flash', arg=partition, info_cb=info_cb,
timeout_ms=timeout_ms)
def erase(self, partition, timeout_ms=None):
"""Erases the given partition."""
self._simple_command('erase', arg=partition, timeout_ms=timeout_ms)
def get_var(self, var, info_cb=DEFAULT_MESSAGE_CALLBACK):
"""Returns the given variable's definition.
Args:
var: A variable the bootloader tracks, such as version.
info_cb: See Download. Usually no messages.
Returns:
Value of var according to the current bootloader.
"""
return self._simple_command('getvar', arg=var, info_cb=info_cb)
def oem(self, command, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK):
"""Executes an OEM command on the device.
Args:
command: The command to execute, such as 'poweroff' or 'bootconfig read'.
timeout_ms: Optional timeout in milliseconds to wait for a response.
info_cb: See Download. Messages vary based on command.
Returns:
The final response from the device.
"""
return self._simple_command(
'oem %s' % command, timeout_ms=timeout_ms, info_cb=info_cb)
def continue_(self):
"""Continues execution past fastboot into the system."""
return self._simple_command('continue')
def reboot(self, target_mode=None, timeout_ms=None):
"""Reboots the device.
Args:
target_mode: Normal reboot when unspecified (or None). Can specify
other target modes, such as 'recovery' or 'bootloader'.
timeout_ms: Optional timeout in milliseconds to wait for a response.
Returns:
Usually the empty string. Depends on the bootloader and the target_mode.
"""
return self._simple_command('reboot', arg=target_mode,
timeout_ms=timeout_ms)
def reboot_bootloader(self, timeout_ms=None):
"""Reboots into the bootloader, usually equiv to Reboot('bootloader')."""
return self._simple_command('reboot-bootloader', timeout_ms=timeout_ms)
|
{
"content_hash": "5ec28ee505a2a05bc150afd534de4721",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 86,
"avg_line_length": 35.06832298136646,
"alnum_prop": 0.6680835990081474,
"repo_name": "jettisonjoe/openhtf",
"id": "b2d958678ac13e8e6f0974886e3e696b0fb0a10e",
"size": "11887",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openhtf/plugs/usb/fastboot_protocol.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "17677"
},
{
"name": "HTML",
"bytes": "16790"
},
{
"name": "JavaScript",
"bytes": "10032"
},
{
"name": "Python",
"bytes": "916591"
},
{
"name": "TypeScript",
"bytes": "118082"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/component/dna/shared_dna_template_bol.iff"
result.attribute_template_id = -1
result.stfName("craft_dna_components_n","dna_template_bol")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "905b29cc925eb09bd8c8a647243010a3",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 78,
"avg_line_length": 25.23076923076923,
"alnum_prop": 0.7042682926829268,
"repo_name": "anhstudios/swganh",
"id": "97bc8b1fe1458a4bdf84a0c3b7004d4b7c137ae7",
"size": "473",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/component/dna/shared_dna_template_bol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
__author__ = 'lsamaha'
import random
class Words(object):
def __init__(self, path='samples'):
self.path = path
self.nouns = self.load_words('nouns')
self.verbs = self.load_words('verbs')
self.adjectives = self.load_words('adjectives')
self.adverbs = self.load_words('adverbs')
def any_noun(self):
return self.any(self.nouns)
def any_verb(self):
return self.any(self.verbs)
def any_adjective(self):
return self.any(self.adjectives)
def any_adverb(self):
return self.any(self.adverbs)
def any(self, words):
return random.choice(words).strip()
def load_words(self, type):
try:
fname = "%s/%s" % (self.path, type)
with open(fname) as f:
words = f.readlines()
except:
words = ['fee','fie','foe','fum']
return words
|
{
"content_hash": "932f51fcf2021e993b4862650b37d576",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 55,
"avg_line_length": 25.083333333333332,
"alnum_prop": 0.5592469545957918,
"repo_name": "lsamaha/dervisher",
"id": "a0a33523e246b3ed12a3e9ebbff8bf850952f99b",
"size": "903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dervisher/words.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10759"
},
{
"name": "Shell",
"bytes": "2793"
}
],
"symlink_target": ""
}
|
from ccxt.async.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
class bter (Exchange):
def describe(self):
return self.deep_extend(super(bter, self).describe(), {
'id': 'bter',
'name': 'Bter',
'countries': ['VG', 'CN'], # British Virgin Islands, China
'version': '2',
'hasCORS': False,
'hasFetchTickers': True,
'hasWithdraw': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27980479-cfa3188c-6387-11e7-8191-93fc4184ba5c.jpg',
'api': {
'public': 'https://data.bter.com/api',
'private': 'https://api.bter.com/api',
},
'www': 'https://bter.com',
'doc': 'https://bter.com/api2',
'fees': 'https://bter.com/fee',
},
'api': {
'public': {
'get': [
'pairs',
'marketinfo',
'marketlist',
'tickers',
'ticker/{id}',
'orderBook/{id}',
'trade/{id}',
'tradeHistory/{id}',
'tradeHistory/{id}/{tid}',
],
},
'private': {
'post': [
'balances',
'depositAddress',
'newAddress',
'depositsWithdrawals',
'buy',
'sell',
'cancelOrder',
'cancelAllOrders',
'getOrder',
'openOrders',
'tradeHistory',
'withdraw',
],
},
},
})
async def fetch_markets(self):
response = await self.publicGetMarketinfo()
markets = self.safe_value(response, 'pairs')
if not markets:
raise ExchangeError(self.id + ' fetchMarkets got an unrecognized response')
result = []
for i in range(0, len(markets)):
market = markets[i]
keys = list(market.keys())
id = keys[0]
details = market[id]
base, quote = id.split('_')
base = base.upper()
quote = quote.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': details['decimal_places'],
'price': details['decimal_places'],
}
amountLimits = {
'min': details['min_amount'],
'max': None,
}
priceLimits = {
'min': None,
'max': None,
}
limits = {
'amount': amountLimits,
'price': priceLimits,
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
'maker': details['fee'] / 100,
'taker': details['fee'] / 100,
'precision': precision,
'limits': limits,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
balance = await self.privatePostBalances()
result = {'info': balance}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
code = self.common_currency_code(currency)
account = self.account()
if 'available' in balance:
if currency in balance['available']:
account['free'] = float(balance['available'][currency])
if 'locked' in balance:
if currency in balance['locked']:
account['used'] = float(balance['locked'][currency])
account['total'] = self.sum(account['free'], account['used'])
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, params={}):
await self.load_markets()
orderbook = await self.publicGetOrderBookId(self.extend({
'id': self.market_id(symbol),
}, params))
result = self.parse_order_book(orderbook)
result['asks'] = self.sort_by(result['asks'], 0)
return result
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high24hr']),
'low': float(ticker['low24hr']),
'bid': float(ticker['highestBid']),
'ask': float(ticker['lowestAsk']),
'vwap': None,
'open': None,
'close': None,
'first': None,
'last': float(ticker['last']),
'change': float(ticker['percentChange']),
'percentage': None,
'average': None,
'baseVolume': float(ticker['quoteVolume']),
'quoteVolume': float(ticker['baseVolume']),
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
tickers = await self.publicGetTickers(params)
result = {}
ids = list(tickers.keys())
for i in range(0, len(ids)):
id = ids[i]
baseId, quoteId = id.split('_')
base = baseId.upper()
quote = quoteId.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
ticker = tickers[id]
market = None
if symbol in self.markets:
market = self.markets[symbol]
if id in self.markets_by_id:
market = self.markets_by_id[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
ticker = await self.publicGetTickerId(self.extend({
'id': market['id'],
}, params))
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market):
timestamp = self.parse8601(trade['date'])
return {
'id': trade['tradeID'],
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': trade['type'],
'price': trade['rate'],
'amount': self.safe_float(trade, 'amount'),
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetTradeHistoryId(self.extend({
'id': market['id'],
}, params))
return self.parse_trades(response['data'], market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type == 'market':
raise ExchangeError(self.id + ' allows limit orders only')
await self.load_markets()
method = 'privatePost' + self.capitalize(side)
order = {
'currencyPair': self.market_id(symbol),
'rate': price,
'amount': amount,
}
response = await getattr(self, method)(self.extend(order, params))
return {
'info': response,
'id': response['orderNumber'],
}
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
return await self.privatePostCancelOrder({'orderNumber': id})
async def withdraw(self, currency, amount, address, params={}):
await self.load_markets()
response = await self.privatePostWithdraw(self.extend({
'currency': currency.lower(),
'amount': amount,
'address': address, # Address must exist in you AddressBook in security settings
}, params))
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
prefix = (api + '/') if (api == 'private') else ''
url = self.urls['api'][api] + self.version + '/1/' + prefix + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = self.nonce()
request = {'nonce': nonce}
body = self.urlencode(self.extend(request, query))
signature = self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512)
headers = {
'Key': self.apiKey,
'Sign': signature,
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'result' in response:
if response['result'] != 'true':
raise ExchangeError(self.id + ' ' + self.json(response))
return response
|
{
"content_hash": "8d3ddbe3a95867eef9ef078ce41341a3",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 126,
"avg_line_length": 37.59851301115241,
"alnum_prop": 0.48566343682024915,
"repo_name": "tritoanst/ccxt",
"id": "fd6fb60f0682d927f0373ba9935778dc2067150b",
"size": "10139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ccxt/async/bter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3955653"
},
{
"name": "PHP",
"bytes": "783191"
},
{
"name": "Python",
"bytes": "680573"
},
{
"name": "Shell",
"bytes": "833"
}
],
"symlink_target": ""
}
|
"""The Minecraft Server sensor platform."""
import logging
from typing import Any, Dict
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import TIME_MILLISECONDS
from homeassistant.helpers.typing import HomeAssistantType
from . import MinecraftServer, MinecraftServerEntity
from .const import (
ATTR_PLAYERS_LIST,
DOMAIN,
ICON_LATENCY_TIME,
ICON_PLAYERS_MAX,
ICON_PLAYERS_ONLINE,
ICON_PROTOCOL_VERSION,
ICON_VERSION,
NAME_LATENCY_TIME,
NAME_PLAYERS_MAX,
NAME_PLAYERS_ONLINE,
NAME_PROTOCOL_VERSION,
NAME_VERSION,
UNIT_PLAYERS_MAX,
UNIT_PLAYERS_ONLINE,
UNIT_PROTOCOL_VERSION,
UNIT_VERSION,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the Minecraft Server sensor platform."""
server = hass.data[DOMAIN][config_entry.unique_id]
# Create entities list.
entities = [
MinecraftServerVersionSensor(server),
MinecraftServerProtocolVersionSensor(server),
MinecraftServerLatencyTimeSensor(server),
MinecraftServerPlayersOnlineSensor(server),
MinecraftServerPlayersMaxSensor(server),
]
# Add sensor entities.
async_add_entities(entities, True)
class MinecraftServerSensorEntity(MinecraftServerEntity):
"""Representation of a Minecraft Server sensor base entity."""
def __init__(
self,
server: MinecraftServer,
type_name: str,
icon: str = None,
unit: str = None,
device_class: str = None,
) -> None:
"""Initialize sensor base entity."""
super().__init__(server, type_name, icon, device_class)
self._state = None
self._unit = unit
@property
def available(self) -> bool:
"""Return sensor availability."""
return self._server.online
@property
def state(self) -> Any:
"""Return sensor state."""
return self._state
@property
def unit_of_measurement(self) -> str:
"""Return sensor measurement unit."""
return self._unit
class MinecraftServerVersionSensor(MinecraftServerSensorEntity):
"""Representation of a Minecraft Server version sensor."""
def __init__(self, server: MinecraftServer) -> None:
"""Initialize version sensor."""
super().__init__(
server=server, type_name=NAME_VERSION, icon=ICON_VERSION, unit=UNIT_VERSION
)
async def async_update(self) -> None:
"""Update version."""
self._state = self._server.version
class MinecraftServerProtocolVersionSensor(MinecraftServerSensorEntity):
"""Representation of a Minecraft Server protocol version sensor."""
def __init__(self, server: MinecraftServer) -> None:
"""Initialize protocol version sensor."""
super().__init__(
server=server,
type_name=NAME_PROTOCOL_VERSION,
icon=ICON_PROTOCOL_VERSION,
unit=UNIT_PROTOCOL_VERSION,
)
async def async_update(self) -> None:
"""Update protocol version."""
self._state = self._server.protocol_version
class MinecraftServerLatencyTimeSensor(MinecraftServerSensorEntity):
"""Representation of a Minecraft Server latency time sensor."""
def __init__(self, server: MinecraftServer) -> None:
"""Initialize latency time sensor."""
super().__init__(
server=server,
type_name=NAME_LATENCY_TIME,
icon=ICON_LATENCY_TIME,
unit=TIME_MILLISECONDS,
)
async def async_update(self) -> None:
"""Update latency time."""
self._state = self._server.latency_time
class MinecraftServerPlayersOnlineSensor(MinecraftServerSensorEntity):
"""Representation of a Minecraft Server online players sensor."""
def __init__(self, server: MinecraftServer) -> None:
"""Initialize online players sensor."""
super().__init__(
server=server,
type_name=NAME_PLAYERS_ONLINE,
icon=ICON_PLAYERS_ONLINE,
unit=UNIT_PLAYERS_ONLINE,
)
async def async_update(self) -> None:
"""Update online players state and device state attributes."""
self._state = self._server.players_online
device_state_attributes = None
players_list = self._server.players_list
if players_list is not None:
if len(players_list) != 0:
device_state_attributes = {ATTR_PLAYERS_LIST: self._server.players_list}
self._device_state_attributes = device_state_attributes
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return players list in device state attributes."""
return self._device_state_attributes
class MinecraftServerPlayersMaxSensor(MinecraftServerSensorEntity):
"""Representation of a Minecraft Server maximum number of players sensor."""
def __init__(self, server: MinecraftServer) -> None:
"""Initialize maximum number of players sensor."""
super().__init__(
server=server,
type_name=NAME_PLAYERS_MAX,
icon=ICON_PLAYERS_MAX,
unit=UNIT_PLAYERS_MAX,
)
async def async_update(self) -> None:
"""Update maximum number of players."""
self._state = self._server.players_max
|
{
"content_hash": "11079aa4c8872ba53a2473947eee9037",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 88,
"avg_line_length": 30.649717514124294,
"alnum_prop": 0.6431336405529954,
"repo_name": "titilambert/home-assistant",
"id": "20f9e98e5303c12cbdd28a22d823dc64e19cba98",
"size": "5425",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "homeassistant/components/minecraft_server/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "25849092"
},
{
"name": "Shell",
"bytes": "4410"
}
],
"symlink_target": ""
}
|
import argparse
import boto3
import datetime
import json, yaml
import logging
import os
import socket
import ssl
import sys
import urllib.request
# Logging: from https://docs.python.org/3/howto/logging-cookbook.html
# Lambda has read-only filesystem. So only create the filehandler when running locally (down in __main__.
logger = logging.getLogger(os.path.splitext(os.path.basename(__file__))[0])
logger.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
# add the handlers to the logger
class ExpiredCertDetector(object):
"""
Class to hold the logic for detecting expired HTTPS certificates.
"""
def __init__(self, **kwargs):
super(ExpiredCertDetector, self).__init__()
self.expired_certs = set()
self.expiring_certs = set()
self.long_lasting_certs = set()
self.unable_to_connect = set()
self.buffer_days = int(os.environ['expiration_buffer'])
self.hosts = set()
def get_hosts(self):
with urllib.request.urlopen(os.environ['hosts_url']) as url:
data = url.read().decode()
try:
hosts = json.loads(data)['hosts']
except json.decoder.JSONDecodeError:
hosts = yaml.load(data)['hosts']
self.hosts = set(hosts)
def run(self):
self.get_hosts()
self.check_https_expiry_datetime()
# Code basically taken from https://serverlesscode.com/post/ssl-expiration-alerts-with-lambda/
def check_https_expiry_datetime(self):
ssl_date_fmt = r'%b %d %H:%M:%S %Y %Z'
context = ssl.create_default_context()
for hostname in self.hosts:
conn = context.wrap_socket(
socket.socket(socket.AF_INET),
server_hostname=hostname,
)
# 1 second timeout because Lambda has runtime limitations
conn.settimeout(1.0)
try:
conn.connect((hostname, 443))
ssl_info = conn.getpeercert()
ssl_expiration_date = datetime.datetime.strptime(ssl_info['notAfter'], ssl_date_fmt)
logger.debug("Host %s has certificate expiration of %s" % (hostname, ssl_expiration_date))
if ssl_expiration_date < datetime.datetime.utcnow():
# already expired!
self.expired_certs.add(hostname)
elif ssl_expiration_date < (datetime.datetime.utcnow() + datetime.timedelta(days=self.buffer_days)):
# going to expire in buffer
self.expiring_certs.add(hostname)
else:
# expires after buffer days
self.long_lasting_certs.add(hostname)
except:
logger.warning("Failed to connect to %s" % hostname)
self.unable_to_connect.add(hostname)
pass
def report(detector_object):
print("---------------")
if len(detector_object.long_lasting_certs) > 0:
expires_past_buffer_log = "Found {} hosts with certificate expiration dates beyond the {} day warning. Hosts: {}".format(
len(detector_object.long_lasting_certs), detector_object.buffer_days, detector_object.long_lasting_certs
)
print(expires_past_buffer_log)
if len(detector_object.expiring_certs) > 0:
expires_soon_log = "Found {} hosts with certificate expiration dates within the {} day warning period. Hosts: {}".format(
len(detector_object.expiring_certs), detector_object.buffer_days, detector_object.expiring_certs
)
print(expires_soon_log)
if len(detector_object.expired_certs) > 0:
expired_log = "Found {} hosts with already expired certificates!!! Hosts: {}".format(
len(detector_object.expired_certs), detector_object.expired_certs
)
print(expired_log)
if len(detector_object.unable_to_connect) > 0:
connect_error_log = "Unable to connect to {} hosts!!! Hosts: {}".format(
len(detector_object.unable_to_connect), detector_object.unable_to_connect
)
print(connect_error_log)
print("---------------")
def send_sns(detector_object):
if len(detector_object.expired_certs) > 0:
message = "The following hosts were identified as having expired certs: {}".format(
detector_object.expired_certs)
boto3.client('sns').publish(
TargetArn=os.environ['sns_topic_arn'],
Message=message,
Subject="Lambda Monitor: Expired HTTPS Certificates"
)
if len(detector_object.expiring_certs) > 0:
message = "The following hosts were identified as having certs that will expire soon: {}".format(
detector_object.expiring_certs)
boto3.client('sns').publish(
TargetArn=os.environ['sns_topic_arn'],
Message=message,
Subject="Lambda Monitor: Expiring HTTPS Certificates"
)
if len(detector_object.unable_to_connect) > 0:
message = "Unable to connect to the following hosts: {}".format(detector_object.unable_to_connect)
boto3.client('sns').publish(
TargetArn=os.environ['sns_topic_arn'],
Message=message,
Subject="Lambda Monitor: Unable to check HTTPS Certificates"
)
print("foo")
def lambda_handler(event, context):
detector = ExpiredCertDetector()
detector.run()
send_sns(detector)
if __name__ == "__main__":
# Uncomment following two lines if want to test/debug/run/emulate lambda
# lambda_handler("foo", "bar")
# sys.exit(0)
parser = argparse.ArgumentParser(description="Check for expired or soon to be expiring HTTPS certificates")
parser.add_argument(
"-n", "--no-report", help="Skip showing report",
action="store_true", default=False
)
args = parser.parse_args()
# # Logging: from https://docs.python.org/3/howto/logging-cookbook.html
# Lambda has read-only filesystem. So only create the filehandler when running locally.
# create file handler which logs even debug messages
fh = logging.FileHandler(os.path.splitext(os.path.basename(__file__))[0] + ".log")
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
detector = ExpiredCertDetector()
detector.run()
if not args.no_report:
report(detector)
|
{
"content_hash": "8a1fcbe627480d2f75e1724ee46bec86",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 129,
"avg_line_length": 36.39010989010989,
"alnum_prop": 0.6282651366450249,
"repo_name": "joehack3r/aws",
"id": "a8be76208e745a5ed8890618369d58b41a4f4cd0",
"size": "6647",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "lambdas/https-certificate-check/code/index.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "42589"
},
{
"name": "Shell",
"bytes": "13971"
}
],
"symlink_target": ""
}
|
from google.cloud import redis_v1beta1
async def sample_list_instances():
# Create a client
client = redis_v1beta1.CloudRedisAsyncClient()
# Initialize request argument(s)
request = redis_v1beta1.ListInstancesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_instances(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END redis_v1beta1_generated_CloudRedis_ListInstances_async]
|
{
"content_hash": "739220b1313722205f6ea328f833b638",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 62,
"avg_line_length": 25.45,
"alnum_prop": 0.7092337917485265,
"repo_name": "googleapis/python-redis",
"id": "5a766a26fc095ff90cf63fedeabf885ee69f93fe",
"size": "1893",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/redis_v1beta1_generated_cloud_redis_list_instances_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "882524"
},
{
"name": "Shell",
"bytes": "30657"
}
],
"symlink_target": ""
}
|
from calvinextras.calvinsys.io.pwm import BasePWM
import pigpio
class PIGPIOPWM(BasePWM.BasePWM):
"""
Calvinsys object handling PWM for a pin using the pigpio package (& daemon)
"""
def init(self, pin,frequency, dutycycle, **kwargs):
self._pin = pin
self._dutycycle = dutycycle
self._frequency = frequency
self._gpio = pigpio.pi()
self._gpio.set_mode(self._pin, pigpio.OUTPUT)
self._gpio.set_PWM_range(self._pin, 100)# pigpio uses default dc range [0, 255]
self._gpio.set_PWM_frequency(self._pin, self._frequency)
self._gpio.set_PWM_dutycycle(self._pin, self._dutycycle)
def can_write(self):
return self._gpio is not None
def write(self, dutycycle):
self._dutycycle = dutycycle
self._gpio.set_PWM_dutycycle(self._pin, self._dutycycle)
def close(self):
del self._gpio
self._gpio = None
|
{
"content_hash": "5df8834011e56b39cdc50393480ed74a",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 87,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.624468085106383,
"repo_name": "EricssonResearch/calvin-base",
"id": "6f922cd0eb4796571f8d8e475ffa31d2ce422807",
"size": "1545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calvinextras/calvinsys/io/pwm/raspberry_pi/PIGPIOPWM.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "769"
},
{
"name": "Dockerfile",
"bytes": "612"
},
{
"name": "HTML",
"bytes": "24571"
},
{
"name": "JavaScript",
"bytes": "78325"
},
{
"name": "Makefile",
"bytes": "816"
},
{
"name": "Python",
"bytes": "3291484"
},
{
"name": "Shell",
"bytes": "37140"
}
],
"symlink_target": ""
}
|
import datetime
import webob.exc
from nova.api.openstack import extensions
from nova import compute
import nova.conf
from nova import context as nova_context
from nova.i18n import _
from nova import utils
CONF = nova.conf.CONF
authorize = extensions.extension_authorizer('compute',
'instance_usage_audit_log')
class InstanceUsageAuditLogController(object):
def __init__(self):
self.host_api = compute.HostAPI()
def index(self, req):
context = req.environ['nova.context']
authorize(context)
task_log = self._get_audit_task_logs(context)
return {'instance_usage_audit_logs': task_log}
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
if '.' in id:
before_date = datetime.datetime.strptime(str(id),
"%Y-%m-%d %H:%M:%S.%f")
else:
before_date = datetime.datetime.strptime(str(id),
"%Y-%m-%d %H:%M:%S")
except ValueError:
msg = _("Invalid timestamp for date %s") % id
raise webob.exc.HTTPBadRequest(explanation=msg)
task_log = self._get_audit_task_logs(context,
before=before_date)
return {'instance_usage_audit_log': task_log}
def _get_audit_task_logs(self, context, begin=None, end=None,
before=None):
"""Returns a full log for all instance usage audit tasks on all
computes.
:param begin: datetime beginning of audit period to get logs for,
Defaults to the beginning of the most recently completed
audit period prior to the 'before' date.
:param end: datetime ending of audit period to get logs for,
Defaults to the ending of the most recently completed
audit period prior to the 'before' date.
:param before: By default we look for the audit period most recently
completed before this datetime. Has no effect if both begin and end
are specified.
"""
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks.
nova_context.require_admin_context(context)
defbegin, defend = utils.last_completed_audit_period(before=before)
if begin is None:
begin = defbegin
if end is None:
end = defend
task_logs = self.host_api.task_log_get_all(context,
"instance_usage_audit",
begin, end)
# We do this in this way to include disabled compute services,
# which can have instances on them. (mdragon)
filters = {'topic': CONF.compute_topic}
services = self.host_api.service_get_all(context, filters=filters)
hosts = set(serv['host'] for serv in services)
seen_hosts = set()
done_hosts = set()
running_hosts = set()
total_errors = 0
total_items = 0
for tlog in task_logs:
seen_hosts.add(tlog['host'])
if tlog['state'] == "DONE":
done_hosts.add(tlog['host'])
if tlog['state'] == "RUNNING":
running_hosts.add(tlog['host'])
total_errors += tlog['errors']
total_items += tlog['task_items']
log = {tl['host']: dict(state=tl['state'],
instances=tl['task_items'],
errors=tl['errors'],
message=tl['message'])
for tl in task_logs}
missing_hosts = hosts - seen_hosts
overall_status = "%s hosts done. %s errors." % (
'ALL' if len(done_hosts) == len(hosts)
else "%s of %s" % (len(done_hosts), len(hosts)),
total_errors)
return dict(period_beginning=str(begin),
period_ending=str(end),
num_hosts=len(hosts),
num_hosts_done=len(done_hosts),
num_hosts_running=len(running_hosts),
num_hosts_not_run=len(missing_hosts),
hosts_not_run=list(missing_hosts),
total_instances=total_items,
total_errors=total_errors,
overall_status=overall_status,
log=log)
class Instance_usage_audit_log(extensions.ExtensionDescriptor):
"""Admin-only Task Log Monitoring."""
name = "OSInstanceUsageAuditLog"
alias = "os-instance_usage_audit_log"
namespace = "http://docs.openstack.org/ext/services/api/v1.1"
updated = "2012-07-06T01:00:00Z"
def get_resources(self):
ext = extensions.ResourceExtension('os-instance_usage_audit_log',
InstanceUsageAuditLogController())
return [ext]
|
{
"content_hash": "5e8de304517b71d1835de9481f23c931",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 79,
"avg_line_length": 41.4344262295082,
"alnum_prop": 0.5432245301681503,
"repo_name": "bigswitch/nova",
"id": "3918c711689610d70df620fbe68fbad5a65012a2",
"size": "5692",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/api/openstack/compute/legacy_v2/contrib/instance_usage_audit_log.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17220528"
},
{
"name": "Shell",
"bytes": "36658"
},
{
"name": "Smarty",
"bytes": "291947"
}
],
"symlink_target": ""
}
|
try:
import json
except ImportError:
import simplejson as json
import logging
import time
import unittest
from nose.tools import assert_true, assert_false, assert_equal
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import grant_access
from jobsub.models import OozieDesign
from liboozie.oozie_api_test import OozieServerProvider
from jobbrowser import models, views
from jobbrowser.conf import SHARE_JOBS
LOG = logging.getLogger(__name__)
def test_dots_to_camel_case():
assert_equal("fooBar", models.dots_to_camel_case("foo.bar"))
assert_equal("fooBarBaz", models.dots_to_camel_case("foo.bar.baz"))
assert_equal("foo", models.dots_to_camel_case("foo"))
assert_equal("foo.", models.dots_to_camel_case("foo."))
def test_get_path():
assert_equal("/foo/bar", models.get_path("hdfs://host/foo/bar"))
def test_format_counter_name():
assert_equal("Foo Bar", views.format_counter_name("fooBar"))
assert_equal("Foo Bar Baz", views.format_counter_name("fooBarBaz"))
assert_equal("Foo", views.format_counter_name("foo"))
assert_equal("Foo.", views.format_counter_name("foo."))
assert_equal("A Bbb Ccc", views.format_counter_name("A_BBB_CCC"))\
def get_hadoop_job_id(oozie_api, oozie_jobid, action_index=1, timeout=60, step=5):
hadoop_job_id = None
start = time.time()
while not hadoop_job_id and time.time() - start < timeout:
time.sleep(step)
hadoop_job_id = oozie_api.get_job(oozie_jobid).actions[action_index].externalId
if not hadoop_job_id:
logs = OozieServerProvider.oozie.get_job_log(oozie_jobid)
msg = "[%d] %s took more than %d to create a job: %s" % (time.time(), oozie_jobid, timeout, logs)
LOG.info(msg)
raise Exception(msg)
return hadoop_job_id
class TestJobBrowserWithHadoop(unittest.TestCase, OozieServerProvider):
"""
Tests for JobBrowser that requires Hadoop. Use the same mini_cluster and jobsubd.
"""
requires_hadoop = True
user_count = 0
@classmethod
def setup_class(cls):
OozieServerProvider.setup_class()
if not cls.cluster.fs.exists("/tmp"):
cls.cluster.fs.do_as_superuser(cls.cluster.fs.mkdir, "/tmp")
cls.cluster.fs.do_as_superuser(cls.cluster.fs.chmod, "/tmp", 0777)
# Install examples
import jobsub.management.commands.jobsub_setup as jobsub_setup
if not jobsub_setup.Command().has_been_setup():
jobsub_setup.Command().handle()
cls.sleep_design_id = OozieDesign.objects.get(name='sleep_job').id
def setUp(self):
TestJobBrowserWithHadoop.user_count += 1
self.username = 'test' + str(TestJobBrowserWithHadoop.user_count)
self.home_dir = '/user/%s' % self.username
self.cluster.fs.do_as_user(self.username, self.cluster.fs.create_home_dir, self.home_dir)
self.cluster.fs.do_as_superuser(self.cluster.fs.chmod, self.home_dir, 0777, True)
self.cluster.fs.do_as_superuser(self.cluster.fs.chown, self.home_dir, self.username, "test", recursive=True)
self.client = make_logged_in_client(username=self.username, is_superuser=False, groupname='test')
grant_access(self.username, 'test', 'jobsub')
grant_access(self.username, 'test', 'jobbrowser')
# Ensure access to MR folder
self.cluster.fs.do_as_superuser(self.cluster.fs.chmod, '/tmp', 0777, recursive=True)
self.cluster.fs.setuser(self.username)
def tearDown(self):
try:
# Remove user home directories.
self.cluster.fs.do_as_superuser(self.cluster.fs.rmtree, self.home_dir)
except:
pass
def test_uncommon_views(self):
"""
These views exist, but tend not to be ever called,
because they're not in the normal UI.
"""
# None of these should raise
self.client.get("/jobbrowser/clusterstatus")
self.client.get("/jobbrowser/queues")
self.client.get("/jobbrowser/jobbrowser")
def test_failed_jobs(self):
"""
Test jobs with genuine failure, not just killed
"""
# Create design that will fail because the script file isn't there
INPUT_DIR = self.home_dir + '/input'
OUTPUT_DIR = self.home_dir + '/output'
try:
self.cluster.fs.mkdir(self.home_dir + "/jt-test_failed_jobs")
self.cluster.fs.mkdir(INPUT_DIR)
self.cluster.fs.rmtree(OUTPUT_DIR)
except:
# rmtree probably failed here.
pass
response = self.client.post('/jobsub/new_design/mapreduce', {
'wf-name': ['test_failed_jobs-1'],
'wf-description': ['description test_failed_jobs-1'],
'action-args': [''],
'action-jar_path': ['/user/hue/jobsub/examples/hadoop-examples.jar'],
'action-archives': ['[]'],
'action-job_properties': ['[{"name":"mapred.input.dir","value":"%s"},\
{"name":"mapred.output.dir","value":"%s"},\
{"name":"mapred.mapper.class","value":"org.apache.hadoop.mapred.lib.dne"},\
{"name":"mapred.combiner.class","value":"org.apache.hadoop.mapred.lib.dne"},\
{"name":"mapred.reducer.class","value":"org.apache.hadoop.mapred.lib.dne"}]' % (INPUT_DIR, OUTPUT_DIR)],
'action-files': ['[]']}, follow=True)
designs = json.loads(response.context['designs'])
# Submit the job
design_id = designs[0]['id']
response = self.client.post("/jobsub/submit_design/%d" % design_id, follow=True)
oozie_jobid = response.context['jobid']
job = OozieServerProvider.wait_until_completion(oozie_jobid, timeout=500, step=1)
hadoop_job_id = get_hadoop_job_id(self.oozie, oozie_jobid, 1)
# Select only killed jobs (should be absent)
# Taking advantage of the fact new jobs are at the top of the list!
response = self.client.get('/jobbrowser/jobs/?state=killed')
assert_false(hadoop_job_id in response.content)
# Select only failed jobs (should be present)
# Map job should succeed. Reduce job should fail.
response = self.client.get('/jobbrowser/jobs/?state=failed')
assert_true(hadoop_job_id in response.content)
# The single job view should have the failed task table
response = self.client.get('/jobbrowser/jobs/%s' % (hadoop_job_id,))
html = response.content.lower()
assert_true('failed task' in html)
# The map task should say success (empty input)
map_task_id = hadoop_job_id.replace('job', 'task') + '_m_000000'
response = self.client.get('/jobbrowser/jobs/%s/tasks/%s' % (hadoop_job_id, map_task_id))
assert_true('succeed' in response.content)
assert_true('failed' not in response.content)
# The reduce task should say failed
reduce_task_id = hadoop_job_id.replace('job', 'task') + '_r_000000'
response = self.client.get('/jobbrowser/jobs/%s/tasks/%s' % (hadoop_job_id, reduce_task_id))
assert_true('succeed' not in response.content)
assert_true('failed' in response.content)
# Selecting by failed state should include the failed map
response = self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=failed' % (hadoop_job_id,))
assert_true('_r_000000' in response.content)
assert_true('_m_000000' not in response.content)
def test_kill_job(self):
"""
Test job in kill state.
"""
# Clone design
assert_equal(0, OozieDesign.objects.filter(owner__username=self.username).count())
self.client.post('/jobsub/clone_design/%d' % self.sleep_design_id)
assert_equal(1, OozieDesign.objects.filter(owner__username=self.username).count())
# Run the sleep example, since it doesn't require user home directory
design_id = OozieDesign.objects.get(owner__username=self.username).id
response = self.client.post("/jobsub/submit_design/%d" % (design_id,),
dict(map_sleep_time=1,
num_maps=1,
num_reduces=1,
reduce_sleep_time=1),
follow=True)
oozie_jobid = response.context['jobid']
# Wait for a job to be created and fetch job ID
hadoop_job_id = get_hadoop_job_id(self.oozie, oozie_jobid, 1)
client2 = make_logged_in_client('test_non_superuser', is_superuser=False, groupname='test')
grant_access('test_non_superuser', 'test', 'jobbrowser')
response = client2.post('/jobbrowser/jobs/%s/kill' % (hadoop_job_id,))
assert_equal("Permission denied. User test_non_superuser cannot delete user %s's job." % self.username, response.context["error"])
# Make sure that the first map task succeeds before moving on
# This will keep us from hitting timing-related failures
first_mapper = hadoop_job_id.replace('job', 'task') + '_m_000000'
start = time.time()
timeout_sec = 60
while first_mapper not in \
self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=succeeded' % (hadoop_job_id,)).content:
time.sleep(1)
# If this assert fails, something has probably really failed
assert_true(time.time() - start < timeout_sec,
"Timed out waiting for first mapper to complete")
# Kill task
self.client.post('/jobbrowser/jobs/%s/kill' % (hadoop_job_id,))
# It should say killed
response = self.client.get('/jobbrowser/jobs/%s' % (hadoop_job_id,))
html = response.content.lower()
assert_true(hadoop_job_id in html)
assert_true('killed' in html)
# Exercise select by taskstate
self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=failed' % (hadoop_job_id,))
self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=succeeded' % (hadoop_job_id,))
self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=running' % (hadoop_job_id,))
self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=killed' % (hadoop_job_id,))
# Test single task page
late_task_id = hadoop_job_id.replace('job', 'task') + '_r_000000'
response = self.client.get('/jobbrowser/jobs/%s/tasks/%s' % (hadoop_job_id, late_task_id))
assert_false('succeed' in response.content)
assert_true('killed' in response.content)
# The first task should've succeeded
# We use a different method of checking success for this one
early_task_id = hadoop_job_id.replace('job', 'task') + '_m_000000'
response = self.client.get('/jobbrowser/jobs/%s/tasks/%s' % (hadoop_job_id, early_task_id))
assert_true('succeed' in response.content)
assert_false('failed' in response.content)
# Test single attempt page
early_task_id = hadoop_job_id.replace('job', 'task') + '_m_000000'
attempt_id = early_task_id.replace('task', 'attempt') + '_0'
response = self.client.get('/jobbrowser/jobs/%s/tasks/%s/attempts/%s/logs' %
(hadoop_job_id, early_task_id, attempt_id))
assert_true('syslog' in response.content)
# Test dock jobs
response = self.client.get('/jobbrowser/dock_jobs/')
assert_false('completed' in response.content)
assert_false('failed' in response.content)
def test_job(self):
"""
Test new job views.
The status of the jobs should be the same as the status reported back by oozie.
In this case, all jobs should succeed.
"""
# Clone design
assert_equal(0, OozieDesign.objects.filter(owner__username=self.username).count())
self.client.post('/jobsub/clone_design/%d' % self.sleep_design_id)
assert_equal(1, OozieDesign.objects.filter(owner__username=self.username).count())
# Run the sleep example, since it doesn't require user home directory
design_id = OozieDesign.objects.get(owner__username=self.username).id
response = self.client.post("/jobsub/submit_design/%d" % (design_id,),
dict(map_sleep_time=1,
num_maps=1,
num_reduces=1,
reduce_sleep_time=1),
follow=True)
oozie_jobid = response.context['jobid']
job = OozieServerProvider.wait_until_completion(oozie_jobid, timeout=120, step=1)
hadoop_job_id = get_hadoop_job_id(self.oozie, oozie_jobid, 1)
# All jobs page and fetch job ID
# Taking advantage of the fact new jobs are at the top of the list!
response = self.client.get('/jobbrowser/jobs/')
assert_true(hadoop_job_id in response.content)
# Make sure job succeeded
response = self.client.get('/jobbrowser/jobs/?state=completed')
assert_true(hadoop_job_id in response.content)
response = self.client.get('/jobbrowser/jobs/?state=failed')
assert_false(hadoop_job_id in response.content)
response = self.client.get('/jobbrowser/jobs/?state=running')
assert_false(hadoop_job_id in response.content)
response = self.client.get('/jobbrowser/jobs/?state=killed')
assert_false(hadoop_job_id in response.content)
# Check sharing permissions
# Login as ourself
finish = SHARE_JOBS.set_for_testing(True)
try:
response = self.client.get('/jobbrowser/jobs/?user=')
assert_true(hadoop_job_id in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = self.client.get('/jobbrowser/jobs/?user=')
assert_true(hadoop_job_id in response.content)
finally:
finish()
# Login as someone else
client_not_me = make_logged_in_client(username='not_me', is_superuser=False, groupname='test')
grant_access("not_me", "test", "jobbrowser")
finish = SHARE_JOBS.set_for_testing(True)
try:
response = client_not_me.get('/jobbrowser/jobs/?user=')
assert_true(hadoop_job_id in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = client_not_me.get('/jobbrowser/jobs/?user=')
assert_false(hadoop_job_id in response.content)
finally:
finish()
# Single job page
response = self.client.get('/jobbrowser/jobs/%s' % hadoop_job_id)
# Check some counters for single job.
counters = response.context['job'].counters
counters_file_bytes_written = counters['org.apache.hadoop.mapreduce.FileSystemCounter']['counters']['FILE_BYTES_WRITTEN']
assert_true(counters_file_bytes_written['map'] > 0)
assert_true(counters_file_bytes_written['reduce'] > 0)
# We can't just check the complete contents of the python map because the
# SLOTS_MILLIS_* entries have a variable number of milliseconds from
# run-to-run.
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['TOTAL_LAUNCHED_MAPS']['total'], 1)
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['TOTAL_LAUNCHED_REDUCES']['total'], 1)
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['FALLOW_SLOTS_MILLIS_MAPS']['total'], 0)
assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['FALLOW_SLOTS_MILLIS_REDUCES']['total'], 0)
assert_true(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['SLOTS_MILLIS_MAPS']['total'] > 0)
assert_true(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['SLOTS_MILLIS_REDUCES']['total'] > 0)
# There should be 4 tasks for this job: cleanup, setup, map, reduce
response = self.client.get('/jobbrowser/jobs/%s/tasks' % (hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 4)
# Select by tasktype
response = self.client.get('/jobbrowser/jobs/%s/tasks?tasktype=reduce' % (hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 1)
# Select by taskstate
response = self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=succeeded' % (hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 4)
# Select by text
response = self.client.get('/jobbrowser/jobs/%s/tasks?tasktext=clean' % (hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 1)
# Test job single logs page
response = self.client.get('/jobbrowser/jobs/%s/single_logs' % (hadoop_job_id))
assert_true('syslog' in response.content)
|
{
"content_hash": "925be825918317cf41af39e7fd13c1ab",
"timestamp": "",
"source": "github",
"line_count": 362,
"max_line_length": 147,
"avg_line_length": 43.71270718232044,
"alnum_prop": 0.6803589484327603,
"repo_name": "hortonworks/hortonworks-sandbox",
"id": "1f099d1b28e4ce5f0a71f9809d700c3904b1b99b",
"size": "16616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/jobbrowser/src/jobbrowser/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "27264"
},
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "10279874"
},
{
"name": "C++",
"bytes": "208068"
},
{
"name": "CSS",
"bytes": "356769"
},
{
"name": "Emacs Lisp",
"bytes": "3171"
},
{
"name": "Java",
"bytes": "3064179"
},
{
"name": "JavaScript",
"bytes": "1532806"
},
{
"name": "PHP",
"bytes": "4160"
},
{
"name": "Perl",
"bytes": "139518"
},
{
"name": "Python",
"bytes": "27735073"
},
{
"name": "R",
"bytes": "12290"
},
{
"name": "Ruby",
"bytes": "5050"
},
{
"name": "Shell",
"bytes": "42062"
},
{
"name": "XSLT",
"bytes": "585"
}
],
"symlink_target": ""
}
|
from django import forms
from django.forms import ValidationError
from django.utils.translation import ugettext as _
# Import the device choice enums from the models package
from .models import *
class SnmpEditDeviceForm(forms.Form):
name = forms.CharField(
label=_('Name'),
required=True,
max_length=256,
error_messages={
'required': _('The device must have a display name.')
}
)
hostname = forms.CharField(
label=_('Hostname'),
required=True,
max_length=256,
error_messages={
'required': _('Please enter a valid IP address or hostname.'),
}
)
ping_mode = forms.ChoiceField(
choices=device_ping_choices,
label=_('Ping Mode'),
required=True,
initial=0 # Set the initial value to 'ICMP'
)
ping_port = forms.IntegerField(
label=_('Port'),
required=True,
initial=22,
min_value=1,
max_value=65535,
)
snmp_template = forms.ChoiceField(
choices=snmp_template_choices,
label=_('SNMP Template'),
required=True,
initial=0
)
snmp_community = forms.CharField(
label=_('SNMP Community'),
required=True,
max_length=256,
initial='public',
error_messages={
'required': _('Please enter the community string that is configured on this device.'),
}
)
snmp_port = forms.IntegerField(
label=_('SNMP Port'),
required=True,
initial=161,
min_value=1,
max_value=65535,
)
|
{
"content_hash": "671fcc7ecbb0b4e450da83e04782d757",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 98,
"avg_line_length": 24.208955223880597,
"alnum_prop": 0.5702836004932182,
"repo_name": "MikaelSchultz/dofiloop-sentinel",
"id": "25abc0f08dd76ffa18a707aa4e1d3454fb1b5b10",
"size": "1622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sentinel/device/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "46633"
},
{
"name": "HTML",
"bytes": "68123"
},
{
"name": "JavaScript",
"bytes": "91424"
},
{
"name": "Python",
"bytes": "141924"
}
],
"symlink_target": ""
}
|
"""Subsample reads from a pair of fastq or fastq.gz files using reformat.sh
Usage:
subsample_fastq.py (--sample=<N>) IN1 IN2 OUT1 OUT2
subsample_fastq.py (-h --help)
Inputs:
IN1, IN2 Input fastq(.gz) files
OUT1, OUT2 Output subsampled files
Options:
-h --help Show this screen
-n --sample=<N> Number of reads to sample
"""
from docopt import docopt
from plumbum import local
def subsample_fastq(in1, in2, out1, out2, n):
"""Subsample reads from a pair of fastq files using seqtk
Parameters
----------
in1, in2
The input pair
out1, out2
The paths to the subsampled output files
"""
reformat = local['reformat.sh']
reformat[
'in=' + in1, 'in2=' + in2, 'out=' + out1, 'out2=' + out2,
'samplereadstarget=' + n]()
if __name__ == "__main__":
opts = docopt(__doc__)
subsample_fastq(opts['IN1'], opts['IN2'], opts['OUT1'], opts['OUT2'],
n=opts['--sample'])
|
{
"content_hash": "bf4ab8962f5c17008988e890569846f1",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 75,
"avg_line_length": 24.725,
"alnum_prop": 0.5854398382204247,
"repo_name": "fennerm/fmbiopy",
"id": "677072e81e37ec56921cd71fb7d9aadeef917873",
"size": "1011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/subsample_fastq.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "196"
},
{
"name": "Perl",
"bytes": "466"
},
{
"name": "Python",
"bytes": "116621"
},
{
"name": "Shell",
"bytes": "3182"
}
],
"symlink_target": ""
}
|
# coding: utf-8
from sqlalchemy.testing import eq_
from sqlalchemy import *
from sqlalchemy import types as sqltypes, exc, schema
from sqlalchemy.sql import table, column
from sqlalchemy.testing import fixtures, AssertsExecutionResults, AssertsCompiledSQL
from sqlalchemy import testing
from sqlalchemy.util import u, b
from sqlalchemy import util
from sqlalchemy.testing import assert_raises, assert_raises_message
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.dialects.oracle import cx_oracle, base as oracle
from sqlalchemy.engine import default
import decimal
from sqlalchemy.testing.schema import Table, Column
import datetime
import os
from sqlalchemy import sql
from sqlalchemy.testing.mock import Mock
class OutParamTest(fixtures.TestBase, AssertsExecutionResults):
__only_on__ = 'oracle+cx_oracle'
@classmethod
def setup_class(cls):
testing.db.execute("""
create or replace procedure foo(x_in IN number, x_out OUT number,
y_out OUT number, z_out OUT varchar) IS
retval number;
begin
retval := 6;
x_out := 10;
y_out := x_in * 15;
z_out := NULL;
end;
""")
def test_out_params(self):
result = testing.db.execute(text('begin foo(:x_in, :x_out, :y_out, '
':z_out); end;',
bindparams=[bindparam('x_in', Float),
outparam('x_out', Integer),
outparam('y_out', Float),
outparam('z_out', String)]), x_in=5)
eq_(result.out_parameters,
{'x_out': 10, 'y_out': 75, 'z_out': None})
assert isinstance(result.out_parameters['x_out'], int)
@classmethod
def teardown_class(cls):
testing.db.execute("DROP PROCEDURE foo")
class CXOracleArgsTest(fixtures.TestBase):
__only_on__ = 'oracle+cx_oracle'
def test_autosetinputsizes(self):
dialect = cx_oracle.dialect()
assert dialect.auto_setinputsizes
dialect = cx_oracle.dialect(auto_setinputsizes=False)
assert not dialect.auto_setinputsizes
def test_exclude_inputsizes_none(self):
dialect = cx_oracle.dialect(exclude_setinputsizes=None)
eq_(dialect.exclude_setinputsizes, set())
def test_exclude_inputsizes_custom(self):
import cx_Oracle
dialect = cx_oracle.dialect(dbapi=cx_Oracle,
exclude_setinputsizes=('NCLOB',))
eq_(dialect.exclude_setinputsizes, set([cx_Oracle.NCLOB]))
class QuotedBindRoundTripTest(fixtures.TestBase):
__only_on__ = 'oracle'
@testing.provide_metadata
def test_table_round_trip(self):
oracle.RESERVED_WORDS.remove('UNION')
metadata = self.metadata
table = Table("t1", metadata,
Column("option", Integer),
Column("plain", Integer, quote=True),
# test that quote works for a reserved word
# that the dialect isn't aware of when quote
# is set
Column("union", Integer, quote=True)
)
metadata.create_all()
table.insert().execute(
{"option": 1, "plain": 1, "union": 1}
)
eq_(
testing.db.execute(table.select()).first(),
(1, 1, 1)
)
table.update().values(option=2, plain=2, union=2).execute()
eq_(
testing.db.execute(table.select()).first(),
(2, 2, 2)
)
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "oracle" #oracle.dialect()
def test_true_false(self):
self.assert_compile(
sql.false(), "0"
)
self.assert_compile(
sql.true(),
"1"
)
def test_owner(self):
meta = MetaData()
parent = Table('parent', meta, Column('id', Integer,
primary_key=True), Column('name', String(50)),
schema='ed')
child = Table('child', meta, Column('id', Integer,
primary_key=True), Column('parent_id', Integer,
ForeignKey('ed.parent.id')), schema='ed')
self.assert_compile(parent.join(child),
'ed.parent JOIN ed.child ON ed.parent.id = '
'ed.child.parent_id')
def test_subquery(self):
t = table('sometable', column('col1'), column('col2'))
s = select([t])
s = select([s.c.col1, s.c.col2])
self.assert_compile(s, "SELECT col1, col2 FROM (SELECT "
"sometable.col1 AS col1, sometable.col2 "
"AS col2 FROM sometable)")
def test_bindparam_quote(self):
"""test that bound parameters take on quoting for reserved words,
column names quote flag enabled."""
# note: this is only in cx_oracle at the moment. not sure
# what other hypothetical oracle dialects might need
self.assert_compile(
bindparam("option"), ':"option"'
)
self.assert_compile(
bindparam("plain"), ':plain'
)
t = Table("s", MetaData(), Column('plain', Integer, quote=True))
self.assert_compile(
t.insert().values(plain=5), 'INSERT INTO s ("plain") VALUES (:"plain")'
)
self.assert_compile(
t.update().values(plain=5), 'UPDATE s SET "plain"=:"plain"'
)
def test_limit(self):
t = table('sometable', column('col1'), column('col2'))
s = select([t])
c = s.compile(dialect=oracle.OracleDialect())
assert t.c.col1 in set(c.result_map['col1'][1])
s = select([t]).limit(10).offset(20)
self.assert_compile(s,
'SELECT col1, col2 FROM (SELECT col1, '
'col2, ROWNUM AS ora_rn FROM (SELECT '
'sometable.col1 AS col1, sometable.col2 AS '
'col2 FROM sometable) WHERE ROWNUM <= '
':param_1 + :param_2) WHERE ora_rn > :param_2',
checkparams={'param_1': 10, 'param_2': 20})
c = s.compile(dialect=oracle.OracleDialect())
assert t.c.col1 in set(c.result_map['col1'][1])
s = select([s.c.col1, s.c.col2])
self.assert_compile(s,
'SELECT col1, col2 FROM (SELECT col1, col2 '
'FROM (SELECT col1, col2, ROWNUM AS ora_rn '
'FROM (SELECT sometable.col1 AS col1, '
'sometable.col2 AS col2 FROM sometable) '
'WHERE ROWNUM <= :param_1 + :param_2) WHERE ora_rn > '
':param_2)',
checkparams={'param_1': 10, 'param_2': 20})
self.assert_compile(s,
'SELECT col1, col2 FROM (SELECT col1, col2 '
'FROM (SELECT col1, col2, ROWNUM AS ora_rn '
'FROM (SELECT sometable.col1 AS col1, '
'sometable.col2 AS col2 FROM sometable) '
'WHERE ROWNUM <= :param_1 + :param_2) WHERE ora_rn > '
':param_2)')
s = select([t]).limit(10).offset(20).order_by(t.c.col2)
self.assert_compile(s,
'SELECT col1, col2 FROM (SELECT col1, '
'col2, ROWNUM AS ora_rn FROM (SELECT '
'sometable.col1 AS col1, sometable.col2 AS '
'col2 FROM sometable ORDER BY '
'sometable.col2) WHERE ROWNUM <= '
':param_1 + :param_2) WHERE ora_rn > :param_2',
checkparams={'param_1': 10, 'param_2': 20}
)
s = select([t], for_update=True).limit(10).order_by(t.c.col2)
self.assert_compile(s,
'SELECT col1, col2 FROM (SELECT '
'sometable.col1 AS col1, sometable.col2 AS '
'col2 FROM sometable ORDER BY '
'sometable.col2) WHERE ROWNUM <= :param_1 '
'FOR UPDATE')
s = select([t],
for_update=True).limit(10).offset(20).order_by(t.c.col2)
self.assert_compile(s,
'SELECT col1, col2 FROM (SELECT col1, '
'col2, ROWNUM AS ora_rn FROM (SELECT '
'sometable.col1 AS col1, sometable.col2 AS '
'col2 FROM sometable ORDER BY '
'sometable.col2) WHERE ROWNUM <= '
':param_1 + :param_2) WHERE ora_rn > :param_2 FOR '
'UPDATE')
def test_for_update(self):
table1 = table('mytable',
column('myid'), column('name'), column('description'))
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE")
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(of=table1.c.myid),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE OF mytable.myid")
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(nowait=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE NOWAIT")
self.assert_compile(
table1.select(table1.c.myid == 7).
with_for_update(nowait=True, of=table1.c.myid),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 "
"FOR UPDATE OF mytable.myid NOWAIT")
self.assert_compile(
table1.select(table1.c.myid == 7).
with_for_update(nowait=True, of=[table1.c.myid, table1.c.name]),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE OF "
"mytable.myid, mytable.name NOWAIT")
ta = table1.alias()
self.assert_compile(
ta.select(ta.c.myid == 7).
with_for_update(of=[ta.c.myid, ta.c.name]),
"SELECT mytable_1.myid, mytable_1.name, mytable_1.description "
"FROM mytable mytable_1 "
"WHERE mytable_1.myid = :myid_1 FOR UPDATE OF "
"mytable_1.myid, mytable_1.name"
)
def test_limit_preserves_typing_information(self):
class MyType(TypeDecorator):
impl = Integer
stmt = select([type_coerce(column('x'), MyType).label('foo')]).limit(1)
dialect = oracle.dialect()
compiled = stmt.compile(dialect=dialect)
assert isinstance(compiled.result_map['foo'][-1], MyType)
def test_use_binds_for_limits_disabled(self):
t = table('sometable', column('col1'), column('col2'))
dialect = oracle.OracleDialect(use_binds_for_limits=False)
self.assert_compile(select([t]).limit(10),
"SELECT col1, col2 FROM (SELECT sometable.col1 AS col1, "
"sometable.col2 AS col2 FROM sometable) WHERE ROWNUM <= 10",
dialect=dialect)
self.assert_compile(select([t]).offset(10),
"SELECT col1, col2 FROM (SELECT col1, col2, ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable)) WHERE ora_rn > 10",
dialect=dialect)
self.assert_compile(select([t]).limit(10).offset(10),
"SELECT col1, col2 FROM (SELECT col1, col2, ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable) WHERE ROWNUM <= 20) WHERE ora_rn > 10",
dialect=dialect)
def test_use_binds_for_limits_enabled(self):
t = table('sometable', column('col1'), column('col2'))
dialect = oracle.OracleDialect(use_binds_for_limits=True)
self.assert_compile(select([t]).limit(10),
"SELECT col1, col2 FROM (SELECT sometable.col1 AS col1, "
"sometable.col2 AS col2 FROM sometable) WHERE ROWNUM "
"<= :param_1",
dialect=dialect)
self.assert_compile(select([t]).offset(10),
"SELECT col1, col2 FROM (SELECT col1, col2, ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable)) WHERE ora_rn > :param_1",
dialect=dialect)
self.assert_compile(select([t]).limit(10).offset(10),
"SELECT col1, col2 FROM (SELECT col1, col2, ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable) WHERE ROWNUM <= :param_1 + :param_2) "
"WHERE ora_rn > :param_2",
dialect=dialect,
checkparams={'param_1': 10, 'param_2': 10})
def test_long_labels(self):
dialect = default.DefaultDialect()
dialect.max_identifier_length = 30
ora_dialect = oracle.dialect()
m = MetaData()
a_table = Table(
'thirty_characters_table_xxxxxx',
m,
Column('id', Integer, primary_key=True)
)
other_table = Table(
'other_thirty_characters_table_',
m,
Column('id', Integer, primary_key=True),
Column('thirty_characters_table_id',
Integer,
ForeignKey('thirty_characters_table_xxxxxx.id'),
primary_key=True
)
)
anon = a_table.alias()
self.assert_compile(select([other_table,
anon]).
select_from(
other_table.outerjoin(anon)).apply_labels(),
'SELECT other_thirty_characters_table_.id '
'AS other_thirty_characters__1, '
'other_thirty_characters_table_.thirty_char'
'acters_table_id AS other_thirty_characters'
'__2, thirty_characters_table__1.id AS '
'thirty_characters_table__3 FROM '
'other_thirty_characters_table_ LEFT OUTER '
'JOIN thirty_characters_table_xxxxxx AS '
'thirty_characters_table__1 ON '
'thirty_characters_table__1.id = '
'other_thirty_characters_table_.thirty_char'
'acters_table_id', dialect=dialect)
self.assert_compile(select([other_table,
anon]).select_from(
other_table.outerjoin(anon)).apply_labels(),
'SELECT other_thirty_characters_table_.id '
'AS other_thirty_characters__1, '
'other_thirty_characters_table_.thirty_char'
'acters_table_id AS other_thirty_characters'
'__2, thirty_characters_table__1.id AS '
'thirty_characters_table__3 FROM '
'other_thirty_characters_table_ LEFT OUTER '
'JOIN thirty_characters_table_xxxxxx '
'thirty_characters_table__1 ON '
'thirty_characters_table__1.id = '
'other_thirty_characters_table_.thirty_char'
'acters_table_id', dialect=ora_dialect)
def test_outer_join(self):
table1 = table('mytable',
column('myid', Integer),
column('name', String),
column('description', String),
)
table2 = table(
'myothertable',
column('otherid', Integer),
column('othername', String),
)
table3 = table(
'thirdtable',
column('userid', Integer),
column('otherstuff', String),
)
query = select([table1, table2], or_(table1.c.name == 'fred',
table1.c.myid == 10, table2.c.othername != 'jack',
'EXISTS (select yay from foo where boo = lar)'
), from_obj=[outerjoin(table1, table2,
table1.c.myid == table2.c.otherid)])
self.assert_compile(query,
'SELECT mytable.myid, mytable.name, '
'mytable.description, myothertable.otherid,'
' myothertable.othername FROM mytable, '
'myothertable WHERE (mytable.name = '
':name_1 OR mytable.myid = :myid_1 OR '
'myothertable.othername != :othername_1 OR '
'EXISTS (select yay from foo where boo = '
'lar)) AND mytable.myid = '
'myothertable.otherid(+)',
dialect=oracle.OracleDialect(use_ansi=False))
query = table1.outerjoin(table2, table1.c.myid
== table2.c.otherid).outerjoin(table3,
table3.c.userid == table2.c.otherid)
self.assert_compile(query.select(),
'SELECT mytable.myid, mytable.name, '
'mytable.description, myothertable.otherid,'
' myothertable.othername, '
'thirdtable.userid, thirdtable.otherstuff '
'FROM mytable LEFT OUTER JOIN myothertable '
'ON mytable.myid = myothertable.otherid '
'LEFT OUTER JOIN thirdtable ON '
'thirdtable.userid = myothertable.otherid')
self.assert_compile(query.select(),
'SELECT mytable.myid, mytable.name, '
'mytable.description, myothertable.otherid,'
' myothertable.othername, '
'thirdtable.userid, thirdtable.otherstuff '
'FROM mytable, myothertable, thirdtable '
'WHERE thirdtable.userid(+) = '
'myothertable.otherid AND mytable.myid = '
'myothertable.otherid(+)',
dialect=oracle.dialect(use_ansi=False))
query = table1.join(table2, table1.c.myid
== table2.c.otherid).join(table3,
table3.c.userid == table2.c.otherid)
self.assert_compile(query.select(),
'SELECT mytable.myid, mytable.name, '
'mytable.description, myothertable.otherid,'
' myothertable.othername, '
'thirdtable.userid, thirdtable.otherstuff '
'FROM mytable, myothertable, thirdtable '
'WHERE thirdtable.userid = '
'myothertable.otherid AND mytable.myid = '
'myothertable.otherid',
dialect=oracle.dialect(use_ansi=False))
query = table1.join(table2, table1.c.myid
== table2.c.otherid).outerjoin(table3,
table3.c.userid == table2.c.otherid)
self.assert_compile(query.select().order_by(table1.c.name).
limit(10).offset(5),
'SELECT myid, name, description, otherid, '
'othername, userid, otherstuff FROM '
'(SELECT myid, name, description, otherid, '
'othername, userid, otherstuff, ROWNUM AS '
'ora_rn FROM (SELECT mytable.myid AS myid, '
'mytable.name AS name, mytable.description '
'AS description, myothertable.otherid AS '
'otherid, myothertable.othername AS '
'othername, thirdtable.userid AS userid, '
'thirdtable.otherstuff AS otherstuff FROM '
'mytable, myothertable, thirdtable WHERE '
'thirdtable.userid(+) = '
'myothertable.otherid AND mytable.myid = '
'myothertable.otherid ORDER BY '
'mytable.name) WHERE ROWNUM <= :param_1 + :param_2) '
'WHERE ora_rn > :param_2',
checkparams={'param_1': 10, 'param_2': 5},
dialect=oracle.dialect(use_ansi=False))
subq = select([table1]).select_from(table1.outerjoin(table2,
table1.c.myid == table2.c.otherid)).alias()
q = select([table3]).select_from(table3.outerjoin(subq,
table3.c.userid == subq.c.myid))
self.assert_compile(q,
'SELECT thirdtable.userid, '
'thirdtable.otherstuff FROM thirdtable '
'LEFT OUTER JOIN (SELECT mytable.myid AS '
'myid, mytable.name AS name, '
'mytable.description AS description FROM '
'mytable LEFT OUTER JOIN myothertable ON '
'mytable.myid = myothertable.otherid) '
'anon_1 ON thirdtable.userid = anon_1.myid',
dialect=oracle.dialect(use_ansi=True))
self.assert_compile(q,
'SELECT thirdtable.userid, '
'thirdtable.otherstuff FROM thirdtable, '
'(SELECT mytable.myid AS myid, '
'mytable.name AS name, mytable.description '
'AS description FROM mytable, myothertable '
'WHERE mytable.myid = myothertable.otherid('
'+)) anon_1 WHERE thirdtable.userid = '
'anon_1.myid(+)',
dialect=oracle.dialect(use_ansi=False))
q = select([table1.c.name]).where(table1.c.name == 'foo')
self.assert_compile(q,
'SELECT mytable.name FROM mytable WHERE '
'mytable.name = :name_1',
dialect=oracle.dialect(use_ansi=False))
subq = select([table3.c.otherstuff]).where(table3.c.otherstuff
== table1.c.name).label('bar')
q = select([table1.c.name, subq])
self.assert_compile(q,
'SELECT mytable.name, (SELECT '
'thirdtable.otherstuff FROM thirdtable '
'WHERE thirdtable.otherstuff = '
'mytable.name) AS bar FROM mytable',
dialect=oracle.dialect(use_ansi=False))
def test_nonansi_nested_right_join(self):
a = table('a', column('a'))
b = table('b', column('b'))
c = table('c', column('c'))
j = a.join(b.join(c, b.c.b == c.c.c), a.c.a == b.c.b)
self.assert_compile(
select([j]),
"SELECT a.a, b.b, c.c FROM a, b, c "
"WHERE a.a = b.b AND b.b = c.c",
dialect=oracle.OracleDialect(use_ansi=False)
)
j = a.outerjoin(b.join(c, b.c.b == c.c.c), a.c.a == b.c.b)
self.assert_compile(
select([j]),
"SELECT a.a, b.b, c.c FROM a, b, c "
"WHERE a.a = b.b(+) AND b.b = c.c",
dialect=oracle.OracleDialect(use_ansi=False)
)
j = a.join(b.outerjoin(c, b.c.b == c.c.c), a.c.a == b.c.b)
self.assert_compile(
select([j]),
"SELECT a.a, b.b, c.c FROM a, b, c "
"WHERE a.a = b.b AND b.b = c.c(+)",
dialect=oracle.OracleDialect(use_ansi=False)
)
def test_alias_outer_join(self):
address_types = table('address_types', column('id'),
column('name'))
addresses = table('addresses', column('id'), column('user_id'),
column('address_type_id'),
column('email_address'))
at_alias = address_types.alias()
s = select([at_alias,
addresses]).select_from(addresses.outerjoin(at_alias,
addresses.c.address_type_id
== at_alias.c.id)).where(addresses.c.user_id
== 7).order_by(addresses.c.id, address_types.c.id)
self.assert_compile(s,
'SELECT address_types_1.id, '
'address_types_1.name, addresses.id, '
'addresses.user_id, addresses.address_type_'
'id, addresses.email_address FROM '
'addresses LEFT OUTER JOIN address_types '
'address_types_1 ON addresses.address_type_'
'id = address_types_1.id WHERE '
'addresses.user_id = :user_id_1 ORDER BY '
'addresses.id, address_types.id')
def test_returning_insert(self):
t1 = table('t1', column('c1'), column('c2'), column('c3'))
self.assert_compile(
t1.insert().values(c1=1).returning(t1.c.c2, t1.c.c3),
"INSERT INTO t1 (c1) VALUES (:c1) RETURNING "
"t1.c2, t1.c3 INTO :ret_0, :ret_1"
)
def test_returning_insert_functional(self):
t1 = table('t1', column('c1'), column('c2', String()), column('c3', String()))
fn = func.lower(t1.c.c2, type_=String())
stmt = t1.insert().values(c1=1).returning(fn, t1.c.c3)
compiled = stmt.compile(dialect=oracle.dialect())
eq_(
compiled.result_map,
{'ret_1': ('ret_1', (t1.c.c3, 'c3', 'c3'), t1.c.c3.type),
'ret_0': ('ret_0', (fn, 'lower', None), fn.type)}
)
self.assert_compile(
stmt,
"INSERT INTO t1 (c1) VALUES (:c1) RETURNING "
"lower(t1.c2), t1.c3 INTO :ret_0, :ret_1"
)
def test_returning_insert_labeled(self):
t1 = table('t1', column('c1'), column('c2'), column('c3'))
self.assert_compile(
t1.insert().values(c1=1).returning(
t1.c.c2.label('c2_l'), t1.c.c3.label('c3_l')),
"INSERT INTO t1 (c1) VALUES (:c1) RETURNING "
"t1.c2, t1.c3 INTO :ret_0, :ret_1"
)
def test_compound(self):
t1 = table('t1', column('c1'), column('c2'), column('c3'))
t2 = table('t2', column('c1'), column('c2'), column('c3'))
self.assert_compile(union(t1.select(), t2.select()),
'SELECT t1.c1, t1.c2, t1.c3 FROM t1 UNION '
'SELECT t2.c1, t2.c2, t2.c3 FROM t2')
self.assert_compile(except_(t1.select(), t2.select()),
'SELECT t1.c1, t1.c2, t1.c3 FROM t1 MINUS '
'SELECT t2.c1, t2.c2, t2.c3 FROM t2')
def test_no_paren_fns(self):
for fn, expected in [
(func.uid(), "uid"),
(func.UID(), "UID"),
(func.sysdate(), "sysdate"),
(func.row_number(), "row_number()"),
(func.rank(), "rank()"),
(func.now(), "CURRENT_TIMESTAMP"),
(func.current_timestamp(), "CURRENT_TIMESTAMP"),
(func.user(), "USER"),
]:
self.assert_compile(fn, expected)
def test_create_index_alt_schema(self):
m = MetaData()
t1 = Table('foo', m,
Column('x', Integer),
schema="alt_schema"
)
self.assert_compile(
schema.CreateIndex(Index("bar", t1.c.x)),
"CREATE INDEX alt_schema.bar ON alt_schema.foo (x)"
)
def test_create_index_expr(self):
m = MetaData()
t1 = Table('foo', m,
Column('x', Integer)
)
self.assert_compile(
schema.CreateIndex(Index("bar", t1.c.x > 5)),
"CREATE INDEX bar ON foo (x > 5)"
)
class CompatFlagsTest(fixtures.TestBase, AssertsCompiledSQL):
def _dialect(self, server_version, **kw):
def server_version_info(conn):
return server_version
dialect = oracle.dialect(
dbapi=Mock(version="0.0.0", paramstyle="named"),
**kw)
dialect._get_server_version_info = server_version_info
dialect._check_unicode_returns = Mock()
dialect._check_unicode_description = Mock()
dialect._get_default_schema_name = Mock()
return dialect
def test_ora8_flags(self):
dialect = self._dialect((8, 2, 5))
# before connect, assume modern DB
assert dialect._supports_char_length
assert dialect._supports_nchar
assert dialect.use_ansi
dialect.initialize(Mock())
assert not dialect.implicit_returning
assert not dialect._supports_char_length
assert not dialect._supports_nchar
assert not dialect.use_ansi
self.assert_compile(String(50), "VARCHAR2(50)", dialect=dialect)
self.assert_compile(Unicode(50), "VARCHAR2(50)", dialect=dialect)
self.assert_compile(UnicodeText(), "CLOB", dialect=dialect)
dialect = self._dialect((8, 2, 5), implicit_returning=True)
dialect.initialize(testing.db.connect())
assert dialect.implicit_returning
def test_default_flags(self):
"""test with no initialization or server version info"""
dialect = self._dialect(None)
assert dialect._supports_char_length
assert dialect._supports_nchar
assert dialect.use_ansi
self.assert_compile(String(50), "VARCHAR2(50 CHAR)", dialect=dialect)
self.assert_compile(Unicode(50), "NVARCHAR2(50)", dialect=dialect)
self.assert_compile(UnicodeText(), "NCLOB", dialect=dialect)
def test_ora10_flags(self):
dialect = self._dialect((10, 2, 5))
dialect.initialize(Mock())
assert dialect._supports_char_length
assert dialect._supports_nchar
assert dialect.use_ansi
self.assert_compile(String(50), "VARCHAR2(50 CHAR)", dialect=dialect)
self.assert_compile(Unicode(50), "NVARCHAR2(50)", dialect=dialect)
self.assert_compile(UnicodeText(), "NCLOB", dialect=dialect)
class MultiSchemaTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = 'oracle'
@classmethod
def setup_class(cls):
# currently assuming full DBA privs for the user.
# don't really know how else to go here unless
# we connect as the other user.
for stmt in """
create table test_schema.parent(
id integer primary key,
data varchar2(50)
);
create table test_schema.child(
id integer primary key,
data varchar2(50),
parent_id integer references test_schema.parent(id)
);
create table local_table(
id integer primary key,
data varchar2(50)
);
create synonym test_schema.ptable for test_schema.parent;
create synonym test_schema.ctable for test_schema.child;
create synonym test_schema_ptable for test_schema.parent;
create synonym test_schema.local_table for local_table;
-- can't make a ref from local schema to the
-- remote schema's table without this,
-- *and* cant give yourself a grant !
-- so we give it to public. ideas welcome.
grant references on test_schema.parent to public;
grant references on test_schema.child to public;
""".split(";"):
if stmt.strip():
testing.db.execute(stmt)
@classmethod
def teardown_class(cls):
for stmt in """
drop table test_schema.child;
drop table test_schema.parent;
drop table local_table;
drop synonym test_schema.ctable;
drop synonym test_schema.ptable;
drop synonym test_schema_ptable;
drop synonym test_schema.local_table;
""".split(";"):
if stmt.strip():
testing.db.execute(stmt)
@testing.provide_metadata
def test_create_same_names_explicit_schema(self):
schema = testing.db.dialect.default_schema_name
meta = self.metadata
parent = Table('parent', meta,
Column('pid', Integer, primary_key=True),
schema=schema
)
child = Table('child', meta,
Column('cid', Integer, primary_key=True),
Column('pid', Integer, ForeignKey('%s.parent.pid' % schema)),
schema=schema
)
meta.create_all()
parent.insert().execute({'pid': 1})
child.insert().execute({'cid': 1, 'pid': 1})
eq_(child.select().execute().fetchall(), [(1, 1)])
def test_reflect_alt_table_owner_local_synonym(self):
meta = MetaData(testing.db)
parent = Table('test_schema_ptable', meta, autoload=True,
oracle_resolve_synonyms=True)
self.assert_compile(parent.select(),
"SELECT test_schema_ptable.id, "
"test_schema_ptable.data FROM test_schema_ptable")
select([parent]).execute().fetchall()
def test_reflect_alt_synonym_owner_local_table(self):
meta = MetaData(testing.db)
parent = Table('local_table', meta, autoload=True,
oracle_resolve_synonyms=True, schema="test_schema")
self.assert_compile(parent.select(),
"SELECT test_schema.local_table.id, "
"test_schema.local_table.data FROM test_schema.local_table")
select([parent]).execute().fetchall()
@testing.provide_metadata
def test_create_same_names_implicit_schema(self):
meta = self.metadata
parent = Table('parent', meta,
Column('pid', Integer, primary_key=True),
)
child = Table('child', meta,
Column('cid', Integer, primary_key=True),
Column('pid', Integer, ForeignKey('parent.pid')),
)
meta.create_all()
parent.insert().execute({'pid': 1})
child.insert().execute({'cid': 1, 'pid': 1})
eq_(child.select().execute().fetchall(), [(1, 1)])
def test_reflect_alt_owner_explicit(self):
meta = MetaData(testing.db)
parent = Table('parent', meta, autoload=True, schema='test_schema')
child = Table('child', meta, autoload=True, schema='test_schema')
self.assert_compile(parent.join(child),
"test_schema.parent JOIN test_schema.child ON "
"test_schema.parent.id = test_schema.child.parent_id")
select([parent, child]).\
select_from(parent.join(child)).\
execute().fetchall()
def test_reflect_local_to_remote(self):
testing.db.execute('CREATE TABLE localtable (id INTEGER '
'PRIMARY KEY, parent_id INTEGER REFERENCES '
'test_schema.parent(id))')
try:
meta = MetaData(testing.db)
lcl = Table('localtable', meta, autoload=True)
parent = meta.tables['test_schema.parent']
self.assert_compile(parent.join(lcl),
'test_schema.parent JOIN localtable ON '
'test_schema.parent.id = '
'localtable.parent_id')
select([parent,
lcl]).select_from(parent.join(lcl)).execute().fetchall()
finally:
testing.db.execute('DROP TABLE localtable')
def test_reflect_alt_owner_implicit(self):
meta = MetaData(testing.db)
parent = Table('parent', meta, autoload=True,
schema='test_schema')
child = Table('child', meta, autoload=True, schema='test_schema'
)
self.assert_compile(parent.join(child),
'test_schema.parent JOIN test_schema.child '
'ON test_schema.parent.id = '
'test_schema.child.parent_id')
select([parent,
child]).select_from(parent.join(child)).execute().fetchall()
def test_reflect_alt_owner_synonyms(self):
testing.db.execute('CREATE TABLE localtable (id INTEGER '
'PRIMARY KEY, parent_id INTEGER REFERENCES '
'test_schema.ptable(id))')
try:
meta = MetaData(testing.db)
lcl = Table('localtable', meta, autoload=True,
oracle_resolve_synonyms=True)
parent = meta.tables['test_schema.ptable']
self.assert_compile(parent.join(lcl),
'test_schema.ptable JOIN localtable ON '
'test_schema.ptable.id = '
'localtable.parent_id')
select([parent,
lcl]).select_from(parent.join(lcl)).execute().fetchall()
finally:
testing.db.execute('DROP TABLE localtable')
def test_reflect_remote_synonyms(self):
meta = MetaData(testing.db)
parent = Table('ptable', meta, autoload=True,
schema='test_schema',
oracle_resolve_synonyms=True)
child = Table('ctable', meta, autoload=True,
schema='test_schema',
oracle_resolve_synonyms=True)
self.assert_compile(parent.join(child),
'test_schema.ptable JOIN '
'test_schema.ctable ON test_schema.ptable.i'
'd = test_schema.ctable.parent_id')
select([parent,
child]).select_from(parent.join(child)).execute().fetchall()
class ConstraintTest(fixtures.TablesTest):
__only_on__ = 'oracle'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('foo', metadata, Column('id', Integer, primary_key=True))
def test_oracle_has_no_on_update_cascade(self):
bar = Table('bar', self.metadata,
Column('id', Integer, primary_key=True),
Column('foo_id', Integer,
ForeignKey('foo.id', onupdate='CASCADE')))
assert_raises(exc.SAWarning, bar.create)
bat = Table('bat', self.metadata,
Column('id', Integer, primary_key=True),
Column('foo_id', Integer),
ForeignKeyConstraint(['foo_id'], ['foo.id'],
onupdate='CASCADE'))
assert_raises(exc.SAWarning, bat.create)
class TwoPhaseTest(fixtures.TablesTest):
"""test cx_oracle two phase, which remains in a semi-broken state
so requires a carefully written test."""
__only_on__ = 'oracle+cx_oracle'
@classmethod
def define_tables(cls, metadata):
Table('datatable', metadata,
Column('id', Integer, primary_key=True),
Column('data', String(50))
)
def _connection(self):
conn = testing.db.connect()
conn.detach()
return conn
def _assert_data(self, rows):
eq_(
testing.db.scalar("select count(*) from datatable"),
rows
)
def test_twophase_prepare_false(self):
conn = self._connection()
for i in range(2):
trans = conn.begin_twophase()
conn.execute("select 1 from dual")
trans.prepare()
trans.commit()
conn.close()
self._assert_data(0)
def test_twophase_prepare_true(self):
conn = self._connection()
for i in range(2):
trans = conn.begin_twophase()
conn.execute("insert into datatable (id, data) "
"values (%s, 'somedata')" % i)
trans.prepare()
trans.commit()
conn.close()
self._assert_data(2)
def test_twophase_rollback(self):
conn = self._connection()
trans = conn.begin_twophase()
conn.execute("insert into datatable (id, data) "
"values (%s, 'somedata')" % 1)
trans.rollback()
trans = conn.begin_twophase()
conn.execute("insert into datatable (id, data) "
"values (%s, 'somedata')" % 1)
trans.prepare()
trans.commit()
conn.close()
self._assert_data(1)
def test_not_prepared(self):
conn = self._connection()
trans = conn.begin_twophase()
conn.execute("insert into datatable (id, data) "
"values (%s, 'somedata')" % 1)
trans.commit()
conn.close()
self._assert_data(1)
class DialectTypesTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = oracle.OracleDialect()
def test_no_clobs_for_string_params(self):
"""test that simple string params get a DBAPI type of
VARCHAR, not CLOB. This is to prevent setinputsizes
from setting up cx_oracle.CLOBs on
string-based bind params [ticket:793]."""
class FakeDBAPI(object):
def __getattr__(self, attr):
return attr
dialect = oracle.OracleDialect()
dbapi = FakeDBAPI()
b = bindparam("foo", "hello world!")
eq_(
b.type.dialect_impl(dialect).get_dbapi_type(dbapi),
'STRING'
)
b = bindparam("foo", "hello world!")
eq_(
b.type.dialect_impl(dialect).get_dbapi_type(dbapi),
'STRING'
)
def test_long(self):
self.assert_compile(oracle.LONG(), "LONG")
def test_type_adapt(self):
dialect = cx_oracle.dialect()
for start, test in [
(Date(), cx_oracle._OracleDate),
(oracle.OracleRaw(), cx_oracle._OracleRaw),
(String(), String),
(VARCHAR(), cx_oracle._OracleString),
(DATE(), cx_oracle._OracleDate),
(oracle.DATE(), oracle.DATE),
(String(50), cx_oracle._OracleString),
(Unicode(), cx_oracle._OracleNVarChar),
(Text(), cx_oracle._OracleText),
(UnicodeText(), cx_oracle._OracleUnicodeText),
(NCHAR(), cx_oracle._OracleNVarChar),
(oracle.RAW(50), cx_oracle._OracleRaw),
]:
assert isinstance(start.dialect_impl(dialect), test), \
"wanted %r got %r" % (test, start.dialect_impl(dialect))
def test_raw_compile(self):
self.assert_compile(oracle.RAW(), "RAW")
self.assert_compile(oracle.RAW(35), "RAW(35)")
def test_char_length(self):
self.assert_compile(VARCHAR(50), "VARCHAR(50 CHAR)")
oracle8dialect = oracle.dialect()
oracle8dialect.server_version_info = (8, 0)
self.assert_compile(VARCHAR(50), "VARCHAR(50)", dialect=oracle8dialect)
self.assert_compile(NVARCHAR(50), "NVARCHAR2(50)")
self.assert_compile(CHAR(50), "CHAR(50)")
def test_varchar_types(self):
dialect = oracle.dialect()
for typ, exp in [
(String(50), "VARCHAR2(50 CHAR)"),
(Unicode(50), "NVARCHAR2(50)"),
(NVARCHAR(50), "NVARCHAR2(50)"),
(VARCHAR(50), "VARCHAR(50 CHAR)"),
(oracle.NVARCHAR2(50), "NVARCHAR2(50)"),
(oracle.VARCHAR2(50), "VARCHAR2(50 CHAR)"),
(String(), "VARCHAR2"),
(Unicode(), "NVARCHAR2"),
(NVARCHAR(), "NVARCHAR2"),
(VARCHAR(), "VARCHAR"),
(oracle.NVARCHAR2(), "NVARCHAR2"),
(oracle.VARCHAR2(), "VARCHAR2"),
]:
self.assert_compile(typ, exp, dialect=dialect)
def test_interval(self):
for type_, expected in [(oracle.INTERVAL(),
'INTERVAL DAY TO SECOND'),
(oracle.INTERVAL(day_precision=3),
'INTERVAL DAY(3) TO SECOND'),
(oracle.INTERVAL(second_precision=5),
'INTERVAL DAY TO SECOND(5)'),
(oracle.INTERVAL(day_precision=2,
second_precision=5),
'INTERVAL DAY(2) TO SECOND(5)')]:
self.assert_compile(type_, expected)
class TypesTest(fixtures.TestBase):
__only_on__ = 'oracle'
__dialect__ = oracle.OracleDialect()
@testing.fails_on('+zxjdbc', 'zxjdbc lacks the FIXED_CHAR dbapi type')
def test_fixed_char(self):
m = MetaData(testing.db)
t = Table('t1', m,
Column('id', Integer, primary_key=True),
Column('data', CHAR(30), nullable=False)
)
t.create()
try:
t.insert().execute(
dict(id=1, data="value 1"),
dict(id=2, data="value 2"),
dict(id=3, data="value 3")
)
eq_(
t.select().where(t.c.data == 'value 2').execute().fetchall(),
[(2, 'value 2 ')]
)
m2 = MetaData(testing.db)
t2 = Table('t1', m2, autoload=True)
assert type(t2.c.data.type) is CHAR
eq_(
t2.select().where(t2.c.data == 'value 2').execute().fetchall(),
[(2, 'value 2 ')]
)
finally:
t.drop()
@testing.requires.returning
@testing.provide_metadata
def test_int_not_float(self):
m = self.metadata
t1 = Table('t1', m, Column('foo', Integer))
t1.create()
r = t1.insert().values(foo=5).returning(t1.c.foo).execute()
x = r.scalar()
assert x == 5
assert isinstance(x, int)
x = t1.select().scalar()
assert x == 5
assert isinstance(x, int)
@testing.provide_metadata
def test_rowid(self):
metadata = self.metadata
t = Table('t1', metadata,
Column('x', Integer)
)
t.create()
t.insert().execute(x=5)
s1 = select([t])
s2 = select([column('rowid')]).select_from(s1)
rowid = s2.scalar()
# the ROWID type is not really needed here,
# as cx_oracle just treats it as a string,
# but we want to make sure the ROWID works...
rowid_col = column('rowid', oracle.ROWID)
s3 = select([t.c.x, rowid_col]).\
where(rowid_col == cast(rowid, oracle.ROWID))
eq_(s3.select().execute().fetchall(),
[(5, rowid)]
)
@testing.fails_on('+zxjdbc',
'Not yet known how to pass values of the '
'INTERVAL type')
@testing.provide_metadata
def test_interval(self):
metadata = self.metadata
interval_table = Table('intervaltable', metadata, Column('id',
Integer, primary_key=True,
test_needs_autoincrement=True),
Column('day_interval',
oracle.INTERVAL(day_precision=3)))
metadata.create_all()
interval_table.insert().\
execute(day_interval=datetime.timedelta(days=35,
seconds=5743))
row = interval_table.select().execute().first()
eq_(row['day_interval'], datetime.timedelta(days=35,
seconds=5743))
@testing.provide_metadata
def test_numerics(self):
m = self.metadata
t1 = Table('t1', m,
Column('intcol', Integer),
Column('numericcol', Numeric(precision=9, scale=2)),
Column('floatcol1', Float()),
Column('floatcol2', FLOAT()),
Column('doubleprec', oracle.DOUBLE_PRECISION),
Column('numbercol1', oracle.NUMBER(9)),
Column('numbercol2', oracle.NUMBER(9, 3)),
Column('numbercol3', oracle.NUMBER),
)
t1.create()
t1.insert().execute(
intcol=1,
numericcol=5.2,
floatcol1=6.5,
floatcol2=8.5,
doubleprec=9.5,
numbercol1=12,
numbercol2=14.85,
numbercol3=15.76
)
m2 = MetaData(testing.db)
t2 = Table('t1', m2, autoload=True)
for row in (
t1.select().execute().first(),
t2.select().execute().first()
):
for i, (val, type_) in enumerate((
(1, int),
(decimal.Decimal("5.2"), decimal.Decimal),
(6.5, float),
(8.5, float),
(9.5, float),
(12, int),
(decimal.Decimal("14.85"), decimal.Decimal),
(15.76, float),
)):
eq_(row[i], val)
assert isinstance(row[i], type_), '%r is not %r' \
% (row[i], type_)
def test_numeric_no_decimal_mode(self):
engine = testing_engine(options=dict(coerce_to_decimal=False))
value = engine.scalar("SELECT 5.66 FROM DUAL")
assert isinstance(value, float)
value = testing.db.scalar("SELECT 5.66 FROM DUAL")
assert isinstance(value, decimal.Decimal)
@testing.only_on("oracle+cx_oracle", "cx_oracle-specific feature")
@testing.fails_if(
testing.requires.python3,
"cx_oracle always returns unicode on py3k")
def test_coerce_to_unicode(self):
engine = testing_engine(options=dict(coerce_to_unicode=True))
value = engine.scalar("SELECT 'hello' FROM DUAL")
assert isinstance(value, util.text_type)
value = testing.db.scalar("SELECT 'hello' FROM DUAL")
assert isinstance(value, util.binary_type)
@testing.provide_metadata
def test_numerics_broken_inspection(self):
"""Numeric scenarios where Oracle type info is 'broken',
returning us precision, scale of the form (0, 0) or (0, -127).
We convert to Decimal and let int()/float() processors take over.
"""
metadata = self.metadata
# this test requires cx_oracle 5
foo = Table('foo', metadata,
Column('idata', Integer),
Column('ndata', Numeric(20, 2)),
Column('ndata2', Numeric(20, 2)),
Column('nidata', Numeric(5, 0)),
Column('fdata', Float()),
)
foo.create()
foo.insert().execute({
'idata': 5,
'ndata': decimal.Decimal("45.6"),
'ndata2': decimal.Decimal("45.0"),
'nidata': decimal.Decimal('53'),
'fdata': 45.68392
})
stmt = "SELECT idata, ndata, ndata2, nidata, fdata FROM foo"
row = testing.db.execute(stmt).fetchall()[0]
eq_(
[type(x) for x in row],
[int, decimal.Decimal, decimal.Decimal, int, float]
)
eq_(
row,
(5, decimal.Decimal('45.6'), decimal.Decimal('45'),
53, 45.683920000000001)
)
# with a nested subquery,
# both Numeric values that don't have decimal places, regardless
# of their originating type, come back as ints with no useful
# typing information beyond "numeric". So native handler
# must convert to int.
# this means our Decimal converters need to run no matter what.
# totally sucks.
stmt = """
SELECT
(SELECT (SELECT idata FROM foo) FROM DUAL) AS idata,
(SELECT CAST((SELECT ndata FROM foo) AS NUMERIC(20, 2)) FROM DUAL)
AS ndata,
(SELECT CAST((SELECT ndata2 FROM foo) AS NUMERIC(20, 2)) FROM DUAL)
AS ndata2,
(SELECT CAST((SELECT nidata FROM foo) AS NUMERIC(5, 0)) FROM DUAL)
AS nidata,
(SELECT CAST((SELECT fdata FROM foo) AS FLOAT) FROM DUAL) AS fdata
FROM dual
"""
row = testing.db.execute(stmt).fetchall()[0]
eq_(
[type(x) for x in row],
[int, decimal.Decimal, int, int, decimal.Decimal]
)
eq_(
row,
(5, decimal.Decimal('45.6'), 45, 53, decimal.Decimal('45.68392'))
)
row = testing.db.execute(text(stmt,
typemap={
'idata': Integer(),
'ndata': Numeric(20, 2),
'ndata2': Numeric(20, 2),
'nidata': Numeric(5, 0),
'fdata': Float()
})).fetchall()[0]
eq_(
[type(x) for x in row],
[int, decimal.Decimal, decimal.Decimal, decimal.Decimal, float]
)
eq_(
row,
(5, decimal.Decimal('45.6'), decimal.Decimal('45'),
decimal.Decimal('53'), 45.683920000000001)
)
stmt = """
SELECT
anon_1.idata AS anon_1_idata,
anon_1.ndata AS anon_1_ndata,
anon_1.ndata2 AS anon_1_ndata2,
anon_1.nidata AS anon_1_nidata,
anon_1.fdata AS anon_1_fdata
FROM (SELECT idata, ndata, ndata2, nidata, fdata
FROM (
SELECT
(SELECT (SELECT idata FROM foo) FROM DUAL) AS idata,
(SELECT CAST((SELECT ndata FROM foo) AS NUMERIC(20, 2))
FROM DUAL) AS ndata,
(SELECT CAST((SELECT ndata2 FROM foo) AS NUMERIC(20, 2))
FROM DUAL) AS ndata2,
(SELECT CAST((SELECT nidata FROM foo) AS NUMERIC(5, 0))
FROM DUAL) AS nidata,
(SELECT CAST((SELECT fdata FROM foo) AS FLOAT) FROM DUAL)
AS fdata
FROM dual
)
WHERE ROWNUM >= 0) anon_1
"""
row = testing.db.execute(stmt).fetchall()[0]
eq_(
[type(x) for x in row],
[int, decimal.Decimal, int, int, decimal.Decimal]
)
eq_(
row,
(5, decimal.Decimal('45.6'), 45, 53, decimal.Decimal('45.68392'))
)
row = testing.db.execute(text(stmt,
typemap={
'anon_1_idata': Integer(),
'anon_1_ndata': Numeric(20, 2),
'anon_1_ndata2': Numeric(20, 2),
'anon_1_nidata': Numeric(5, 0),
'anon_1_fdata': Float()
})).fetchall()[0]
eq_(
[type(x) for x in row],
[int, decimal.Decimal, decimal.Decimal, decimal.Decimal, float]
)
eq_(
row,
(5, decimal.Decimal('45.6'), decimal.Decimal('45'),
decimal.Decimal('53'), 45.683920000000001)
)
row = testing.db.execute(text(stmt,
typemap={
'anon_1_idata': Integer(),
'anon_1_ndata': Numeric(20, 2, asdecimal=False),
'anon_1_ndata2': Numeric(20, 2, asdecimal=False),
'anon_1_nidata': Numeric(5, 0, asdecimal=False),
'anon_1_fdata': Float(asdecimal=True)
})).fetchall()[0]
eq_(
[type(x) for x in row],
[int, float, float, float, decimal.Decimal]
)
eq_(
row,
(5, 45.6, 45, 53, decimal.Decimal('45.68392'))
)
@testing.provide_metadata
def test_reflect_dates(self):
metadata = self.metadata
Table(
"date_types", metadata,
Column('d1', sqltypes.DATE),
Column('d2', oracle.DATE),
Column('d3', TIMESTAMP),
Column('d4', TIMESTAMP(timezone=True)),
Column('d5', oracle.INTERVAL(second_precision=5)),
)
metadata.create_all()
m = MetaData(testing.db)
t1 = Table(
"date_types", m,
autoload=True)
assert isinstance(t1.c.d1.type, oracle.DATE)
assert isinstance(t1.c.d1.type, DateTime)
assert isinstance(t1.c.d2.type, oracle.DATE)
assert isinstance(t1.c.d2.type, DateTime)
assert isinstance(t1.c.d3.type, TIMESTAMP)
assert not t1.c.d3.type.timezone
assert isinstance(t1.c.d4.type, TIMESTAMP)
assert t1.c.d4.type.timezone
assert isinstance(t1.c.d5.type, oracle.INTERVAL)
def test_reflect_all_types_schema(self):
types_table = Table('all_types', MetaData(testing.db),
Column('owner', String(30), primary_key=True),
Column('type_name', String(30), primary_key=True),
autoload=True, oracle_resolve_synonyms=True
)
for row in types_table.select().execute().fetchall():
[row[k] for k in row.keys()]
@testing.provide_metadata
def test_raw_roundtrip(self):
metadata = self.metadata
raw_table = Table('raw', metadata,
Column('id', Integer, primary_key=True),
Column('data', oracle.RAW(35))
)
metadata.create_all()
testing.db.execute(raw_table.insert(), id=1, data=b("ABCDEF"))
eq_(
testing.db.execute(raw_table.select()).first(),
(1, b("ABCDEF"))
)
@testing.provide_metadata
def test_reflect_nvarchar(self):
metadata = self.metadata
Table('t', metadata,
Column('data', sqltypes.NVARCHAR(255))
)
metadata.create_all()
m2 = MetaData(testing.db)
t2 = Table('t', m2, autoload=True)
assert isinstance(t2.c.data.type, sqltypes.NVARCHAR)
if testing.against('oracle+cx_oracle'):
# nvarchar returns unicode natively. cx_oracle
# _OracleNVarChar type should be at play here.
assert isinstance(
t2.c.data.type.dialect_impl(testing.db.dialect),
cx_oracle._OracleNVarChar)
data = u('m’a réveillé.')
t2.insert().execute(data=data)
res = t2.select().execute().first()['data']
eq_(res, data)
assert isinstance(res, util.text_type)
@testing.provide_metadata
def test_char_length(self):
metadata = self.metadata
t1 = Table('t1', metadata,
Column("c1", VARCHAR(50)),
Column("c2", NVARCHAR(250)),
Column("c3", CHAR(200))
)
t1.create()
m2 = MetaData(testing.db)
t2 = Table('t1', m2, autoload=True)
eq_(t2.c.c1.type.length, 50)
eq_(t2.c.c2.type.length, 250)
eq_(t2.c.c3.type.length, 200)
@testing.provide_metadata
def test_long_type(self):
metadata = self.metadata
t = Table('t', metadata,
Column('data', oracle.LONG)
)
metadata.create_all(testing.db)
testing.db.execute(t.insert(), data='xyz')
eq_(
testing.db.scalar(select([t.c.data])),
"xyz"
)
def test_longstring(self):
metadata = MetaData(testing.db)
testing.db.execute("""
CREATE TABLE Z_TEST
(
ID NUMERIC(22) PRIMARY KEY,
ADD_USER VARCHAR2(20) NOT NULL
)
""")
try:
t = Table("z_test", metadata, autoload=True)
t.insert().execute(id=1.0, add_user='foobar')
assert t.select().execute().fetchall() == [(1, 'foobar')]
finally:
testing.db.execute("DROP TABLE Z_TEST")
@testing.fails_on('+zxjdbc', 'auto_convert_lobs not applicable')
def test_lobs_without_convert(self):
engine = testing_engine(options=dict(auto_convert_lobs=False))
metadata = MetaData()
t = Table("z_test", metadata, Column('id', Integer, primary_key=True),
Column('data', Text), Column('bindata', LargeBinary))
t.create(engine)
try:
engine.execute(t.insert(), id=1,
data='this is text',
bindata=b('this is binary'))
row = engine.execute(t.select()).first()
eq_(row['data'].read(), 'this is text')
eq_(row['bindata'].read(), b('this is binary'))
finally:
t.drop(engine)
class EuroNumericTest(fixtures.TestBase):
"""test the numeric output_type_handler when using non-US locale for NLS_LANG."""
__only_on__ = 'oracle+cx_oracle'
def setup(self):
self.old_nls_lang = os.environ.get('NLS_LANG', False)
os.environ['NLS_LANG'] = "GERMAN"
self.engine = testing_engine()
def teardown(self):
if self.old_nls_lang is not False:
os.environ['NLS_LANG'] = self.old_nls_lang
else:
del os.environ['NLS_LANG']
self.engine.dispose()
def test_output_type_handler(self):
for stmt, exp, kw in [
("SELECT 0.1 FROM DUAL", decimal.Decimal("0.1"), {}),
("SELECT 15 FROM DUAL", 15, {}),
("SELECT CAST(15 AS NUMERIC(3, 1)) FROM DUAL",
decimal.Decimal("15"), {}),
("SELECT CAST(0.1 AS NUMERIC(5, 2)) FROM DUAL",
decimal.Decimal("0.1"), {}),
("SELECT :num FROM DUAL", decimal.Decimal("2.5"),
{'num': decimal.Decimal("2.5")})
]:
test_exp = self.engine.scalar(stmt, **kw)
eq_(
test_exp,
exp
)
assert type(test_exp) is type(exp)
class DontReflectIOTTest(fixtures.TestBase):
"""test that index overflow tables aren't included in
table_names."""
__only_on__ = 'oracle'
def setup(self):
testing.db.execute("""
CREATE TABLE admin_docindex(
token char(20),
doc_id NUMBER,
token_frequency NUMBER,
token_offsets VARCHAR2(2000),
CONSTRAINT pk_admin_docindex PRIMARY KEY (token, doc_id))
ORGANIZATION INDEX
TABLESPACE users
PCTTHRESHOLD 20
OVERFLOW TABLESPACE users
""")
def teardown(self):
testing.db.execute("drop table admin_docindex")
def test_reflect_all(self):
m = MetaData(testing.db)
m.reflect()
eq_(
set(t.name for t in m.tables.values()),
set(['admin_docindex'])
)
class BufferedColumnTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = 'oracle'
@classmethod
def setup_class(cls):
global binary_table, stream, meta
meta = MetaData(testing.db)
binary_table = Table('binary_table', meta,
Column('id', Integer, primary_key=True),
Column('data', LargeBinary)
)
meta.create_all()
stream = os.path.join(
os.path.dirname(__file__), "..",
'binary_data_one.dat')
with open(stream, "rb") as file_:
stream = file_.read(12000)
for i in range(1, 11):
binary_table.insert().execute(id=i, data=stream)
@classmethod
def teardown_class(cls):
meta.drop_all()
def test_fetch(self):
result = binary_table.select().order_by(binary_table.c.id).\
execute().fetchall()
eq_(result, [(i, stream) for i in range(1, 11)])
@testing.fails_on('+zxjdbc', 'FIXME: zxjdbc should support this')
def test_fetch_single_arraysize(self):
eng = testing_engine(options={'arraysize': 1})
result = eng.execute(binary_table.select().
order_by(binary_table.c.id)).fetchall()
eq_(result, [(i, stream) for i in range(1, 11)])
class UnsupportedIndexReflectTest(fixtures.TestBase):
__only_on__ = 'oracle'
@testing.emits_warning("No column names")
@testing.provide_metadata
def test_reflect_functional_index(self):
metadata = self.metadata
Table('test_index_reflect', metadata,
Column('data', String(20), primary_key=True)
)
metadata.create_all()
testing.db.execute('CREATE INDEX DATA_IDX ON '
'TEST_INDEX_REFLECT (UPPER(DATA))')
m2 = MetaData(testing.db)
Table('test_index_reflect', m2, autoload=True)
class RoundTripIndexTest(fixtures.TestBase):
__only_on__ = 'oracle'
@testing.provide_metadata
def test_basic(self):
metadata = self.metadata
table = Table("sometable", metadata,
Column("id_a", Unicode(255), primary_key=True),
Column("id_b", Unicode(255), primary_key=True, unique=True),
Column("group", Unicode(255), primary_key=True),
Column("col", Unicode(255)),
UniqueConstraint('col', 'group'),
)
# "group" is a keyword, so lower case
normalind = Index('tableind', table.c.id_b, table.c.group)
metadata.create_all()
mirror = MetaData(testing.db)
mirror.reflect()
metadata.drop_all()
mirror.create_all()
inspect = MetaData(testing.db)
inspect.reflect()
def obj_definition(obj):
return obj.__class__, tuple([c.name for c in
obj.columns]), getattr(obj, 'unique', None)
# find what the primary k constraint name should be
primaryconsname = testing.db.execute(
text("""SELECT constraint_name
FROM all_constraints
WHERE table_name = :table_name
AND owner = :owner
AND constraint_type = 'P' """),
table_name=table.name.upper(),
owner=testing.db.url.username.upper()).fetchall()[0][0]
reflectedtable = inspect.tables[table.name]
# make a dictionary of the reflected objects:
reflected = dict([(obj_definition(i), i) for i in
reflectedtable.indexes
| reflectedtable.constraints])
# assert we got primary key constraint and its name, Error
# if not in dict
assert reflected[(PrimaryKeyConstraint, ('id_a', 'id_b',
'group'), None)].name.upper() \
== primaryconsname.upper()
# Error if not in dict
eq_(
reflected[(Index, ('id_b', 'group'), False)].name,
normalind.name
)
assert (Index, ('id_b', ), True) in reflected
assert (Index, ('col', 'group'), True) in reflected
eq_(len(reflectedtable.constraints), 1)
eq_(len(reflectedtable.indexes), 3)
class SequenceTest(fixtures.TestBase, AssertsCompiledSQL):
def test_basic(self):
seq = Sequence('my_seq_no_schema')
dialect = oracle.OracleDialect()
assert dialect.identifier_preparer.format_sequence(seq) \
== 'my_seq_no_schema'
seq = Sequence('my_seq', schema='some_schema')
assert dialect.identifier_preparer.format_sequence(seq) \
== 'some_schema.my_seq'
seq = Sequence('My_Seq', schema='Some_Schema')
assert dialect.identifier_preparer.format_sequence(seq) \
== '"Some_Schema"."My_Seq"'
class ExecuteTest(fixtures.TestBase):
__only_on__ = 'oracle'
def test_basic(self):
eq_(testing.db.execute('/*+ this is a comment */ SELECT 1 FROM '
'DUAL').fetchall(), [(1, )])
def test_sequences_are_integers(self):
seq = Sequence('foo_seq')
seq.create(testing.db)
try:
val = testing.db.execute(seq)
eq_(val, 1)
assert type(val) is int
finally:
seq.drop(testing.db)
@testing.provide_metadata
def test_limit_offset_for_update(self):
metadata = self.metadata
# oracle can't actually do the ROWNUM thing with FOR UPDATE
# very well.
t = Table('t1', metadata, Column('id', Integer, primary_key=True),
Column('data', Integer)
)
metadata.create_all()
t.insert().execute(
{'id': 1, 'data': 1},
{'id': 2, 'data': 7},
{'id': 3, 'data': 12},
{'id': 4, 'data': 15},
{'id': 5, 'data': 32},
)
# here, we can't use ORDER BY.
eq_(
t.select(for_update=True).limit(2).execute().fetchall(),
[(1, 1),
(2, 7)]
)
# here, its impossible. But we'd prefer it to raise ORA-02014
# instead of issuing a syntax error.
assert_raises_message(
exc.DatabaseError,
"ORA-02014",
t.select(for_update=True).limit(2).offset(3).execute
)
class UnicodeSchemaTest(fixtures.TestBase):
__only_on__ = 'oracle'
@testing.provide_metadata
def test_quoted_column_non_unicode(self):
metadata = self.metadata
table = Table("atable", metadata,
Column("_underscorecolumn", Unicode(255), primary_key=True),
)
metadata.create_all()
table.insert().execute(
{'_underscorecolumn': u('’é')},
)
result = testing.db.execute(
table.select().where(table.c._underscorecolumn == u('’é'))
).scalar()
eq_(result, u('’é'))
@testing.provide_metadata
def test_quoted_column_unicode(self):
metadata = self.metadata
table = Table("atable", metadata,
Column(u("méil"), Unicode(255), primary_key=True),
)
metadata.create_all()
table.insert().execute(
{u('méil'): u('’é')},
)
result = testing.db.execute(
table.select().where(table.c[u('méil')] == u('’é'))
).scalar()
eq_(result, u('’é'))
class DBLinkReflectionTest(fixtures.TestBase):
__requires__ = 'oracle_test_dblink',
__only_on__ = 'oracle'
@classmethod
def setup_class(cls):
from sqlalchemy.testing import config
cls.dblink = config.file_config.get('sqla_testing', 'oracle_db_link')
with testing.db.connect() as conn:
conn.execute(
"create table test_table "
"(id integer primary key, data varchar2(50))")
conn.execute("create synonym test_table_syn "
"for test_table@%s" % cls.dblink)
@classmethod
def teardown_class(cls):
with testing.db.connect() as conn:
conn.execute("drop synonym test_table_syn")
conn.execute("drop table test_table")
def test_hello_world(self):
"""test that the synonym/dblink is functional."""
testing.db.execute("insert into test_table_syn (id, data) "
"values (1, 'some data')")
eq_(
testing.db.execute("select * from test_table_syn").first(),
(1, 'some data')
)
def test_reflection(self):
"""test the resolution of the synonym/dblink. """
m = MetaData()
t = Table('test_table_syn', m, autoload=True,
autoload_with=testing.db, oracle_resolve_synonyms=True)
eq_(list(t.c.keys()), ['id', 'data'])
eq_(list(t.primary_key), [t.c.id])
|
{
"content_hash": "2c564adeaa2bb80210b4f3cd42b3cae7",
"timestamp": "",
"source": "github",
"line_count": 1875,
"max_line_length": 89,
"avg_line_length": 38.616,
"alnum_prop": 0.5185139147848905,
"repo_name": "michaelBenin/sqlalchemy",
"id": "758ae089db4c9830858c8dbe76be17bf11f93c2c",
"size": "72430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/dialect/test_oracle.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
'''
Node object representing each node in the binary tree
'''
class Node:
left = None
right = None
val = None
def __init__(self, value, left, right):
self.val = value
self.left = left
self.right = right
# BFS algortithm to traverse the tree and print the nodes at the given level
def bfs(root, level):
l = 1
nodes = [root]
while l != level:
for i in range(len(nodes)):
n = nodes.pop(0)
if n.left != None:
nodes.append(n.left)
if n.right != None:
nodes.append(n.right)
if len(nodes):
l = l + 1
else:
break
return nodes
# formation of the tree
#root = Node(0, Node(1, Node(2, None, None), Node(3, None, None)), Node(4, Node(5, None, None), Node(6, None, None)))
root = Node(0, Node(1, Node(2, None, None), Node(3, None, None)), None)#Node(4, Node(5, None, None), Node(6, None, None)))
nodes = bfs(root, 3)
for n in nodes:
print n.val,
'''
# print the entire binary tree
l = 1
while 1:
nodes = bfs(root, l)
if len(nodes):
for n in nodes:
print n.val,
print ''
else:
break;
l = l + 1
'''
|
{
"content_hash": "69b801c9c8ba036f56111fcd41e05c88",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 122,
"avg_line_length": 20.9,
"alnum_prop": 0.6181818181818182,
"repo_name": "viveksubbarao/algorithms",
"id": "01a163fe943e89d3c01f621d1ad12c4fdbc65c06",
"size": "1045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bfs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1782"
},
{
"name": "Java",
"bytes": "19779"
},
{
"name": "Python",
"bytes": "4947"
}
],
"symlink_target": ""
}
|
"""
Build ag and run tests.
Run as: ./scripts/runtests.py (because I'm lazy and it's easier to
hard-code such things).
It'll use a temporary directory ../ag_tests during tests.
To make it easy and compact to write additional tests:
- test descriptions are in files in tests directory
- a single file describes multiple tests
- for each test, it says:
- what files exist and their content
- command to run (presumably ag with apropriate flags)
- expected result of the command
- tests stop when the output is different that the expected result. When that
happens, ../ag_tests directory contains the data for the test so that a developer
can investigate the problem
"""
import sys, os, shutil, codecs
import util
# set to True only when testing, speeds up test cycle by not compiling
# the executable if it already exists
NO_BUILD_IF_EXE_EXISTS = True
def fatal(msg):
print(msg); sys.exit(1)
@util.memoize
def top_level_dir():
# auto-detect either being in top-level or inside scripts
if os.path.exists("runtests.py"):
dir = ".."
else:
dir = "."
path = os.path.realpath(dir)
os.chdir(path)
return path
@util.memoize
def ag_tests_dir():
return os.path.realpath(os.path.join(top_level_dir(), "..", "ag_tests"))
def delete_dir(path):
if os.path.exists(path):
shutil.rmtree(path, True)
def delete_ag_tests_dir():
delete_dir(ag_tests_dir())
def recreate_ag_tests_dir():
delete_ag_tests_dir()
path = ag_tests_dir()
os.mkdir(path)
def is_win():
return sys.platform.startswith("win")
def is_mac():
return sys.platform == "darwin"
@util.memoize
def ag_exe_path_win():
return os.path.join(top_level_dir(), "rel", "ag.exe")
@util.memoize
def ag_exe_path():
if is_win():
return ag_exe_path_win()
else:
# mac and unix
return os.path.realpath(os.path.join(top_level_dir(), "ag"))
def verify_started_in_right_directory():
path = os.path.join(top_level_dir(), "scripts", "runtests.py")
if not os.path.exists(path):
fatal("Must be in top level of source directory and run as ./scripts/runtests.py.\npath=%s" % path)
# just to be safe, if ../ag_tests directory exists we won't run tests and
# ask the user to delete it manually. This is either a conflict with a directory
# that a user created or left-over from previous failed run.
def verify_ag_tests_dir_doesnt_exist():
if not os.path.exists(ag_tests_dir()): return
print("ag_tests directory ('%s') directory from previous run exist" % ag_tests_dir())
fatal("Please delete it manually.")
def print_error(out, err, errcode):
print("Failed with error code %d" % errcode)
if len(out) > 0: print("Stdout:\n%s" % out)
if len(err) > 0: print("Stderr:\n%s" % err)
def build_mac():
(out, err, errcode) = util.run_cmd("./build.sh")
if errcode != 0:
print_error(out, err, errcode)
# trying to be helpful and tell user how to resolve specific problems
# TODO: also detect lack of pcre
if "No package 'liblzma' found" in err:
fatal("\nIf you're using homebrew, you need to install xz package to get liblzma\nRun: brew install xz")
sys.exit(1)
def build_win():
util.run_cmd_throw("premake4", "vs2010")
curr_dir = os.getcwd()
os.chdir("vs-premake")
util.kill_msbuild()
util.run_cmd_throw("devenv", "ag.sln", "/Build", "Release", "/Project", "ag.vcxproj")
assert os.path.exists(ag_exe_path_win()), "%s doesn't exist" % ag_exe_path_win()
os.chdir(curr_dir)
# TODO: support unix?
def build():
if NO_BUILD_IF_EXE_EXISTS and os.path.exists(ag_exe_path()):
return
if is_win():
build_win()
elif is_mac():
build_mac()
else:
fatal("Don't know how to build on this platform. sys.platform=%s, os.name=%s" % (sys.platform, os.name))
def path_unix_to_native(path):
parts = path.split("/")
return os.path.join(*parts)
def create_dir_for_file(path):
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
# here path is always in unix format
def write_to_file(path, content):
create_dir_for_file(path)
with codecs.open(path, "wb", "utf8") as fo:
fo.write(content)
class FileInfo(object):
def __init__(self, path):
#print("Constructing FileInfo(%s)" % path)
self.path = path
self.data = None
def write(self, dir):
#print("Writing %s with data:'%s'" % (str(self.path), str(self.data)))
p = os.path.join(dir, path_unix_to_native(self.path))
write_to_file(p, self.data)
class CmdInfo(object):
def __init__(self, cmd, expected):
self.cmd = cmd
self.expected = expected
class TestInfo(object):
def __init__(self):
self.files = [] # of FileInfo
# TODO: should test_name be part of CmdInfo?
self.test_name = "" # optional
self.cmds = [] # of CmdInfo
# context for parsing functions
class Tests(object):
def __init__(self, test_file):
self.test_file = test_file # for debugging
self.lines = []
with codecs.open(test_file, "rb", "utf8") as fo:
self.lines = fo.readlines()
self.lines = [line.rstrip() for line in self.lines]
self.curr_line = 0
self.curr_test_info = None
self.test_infos = []
def start_new_test(self):
t = TestInfo()
self.curr_test_info = t
self.test_infos.append(t)
def unget_line(self):
self.curr_line -= 1
return self.lines[self.curr_line]
def next_line(self):
if self.curr_line >= len(self.lines):
return None
l = self.lines[self.curr_line]
self.curr_line += 1
#print(":%s" % l)
return l
def get_curr_line(self):
return self.lines[self.curr_line]
def raise_error(self):
print("Invalid format of tests file '%s'" % self.test_file)
curr_line = self.unget_line()
print("Curr line no %d:\n'%s'" % (self.curr_line, curr_line))
raise "Error"
TEST_START = "test:"
CMD_START = "cmd:"
FILE_START = "file:"
NEW_TEST = "--"
# helper function that chooses next parsing function based on current line
def select_parsing_function(tests, *valid):
curr_line = tests.get_curr_line()
if curr_line.startswith(TEST_START):
if TEST_START in valid: return parse_test_line
elif curr_line.startswith(CMD_START):
if CMD_START in valid: return parse_cmd_line
elif curr_line.startswith(FILE_START):
if FILE_START in valid: return parse_file_line
elif curr_line == NEW_TEST:
if NEW_TEST in valid:
tests.next_line()
return parse_new_test
# TODO: more cases?
raise BaseException("Invalid state, curr_line=`%s`, valid=%s" % (curr_line, str(valid)))
def read_file_cmd_content(tests):
data_lines = []
while True:
line = tests.next_line()
#print(":%s" % line)
for valid in [CMD_START, FILE_START]:
if line.startswith(valid):
#print("*'%s'" % valid)
tests.unget_line()
return "\n".join(data_lines)
data_lines.append(line)
# file: ...
# ... # file data
def parse_file_line(tests):
line = tests.next_line()
parts = line.split(":", 2)
path = parts[1].strip()
fileInfo = FileInfo(path)
#print("File: %s" % path)
fileInfo.data = read_file_cmd_content(tests)
tests.curr_test_info.files.append(fileInfo)
#print("File data: \n'%s'\n" % fileInfo.data)
return select_parsing_function(tests, CMD_START, FILE_START)
# cmd: ...
# ... # output of the command
def parse_cmd_line(tests):
line = tests.next_line()
parts = line.split(":", 2)
cmd = parts[1].strip()
expected_lines = []
while True:
line = tests.next_line()
if line == None or line == "--" or line.startswith(CMD_START):
break
expected_lines.append(line)
cmd_info = CmdInfo(cmd, "\n".join(expected_lines))
tests.curr_test_info.cmds.append(cmd_info)
if line == None: return None
if line == "--": return parse_new_test
assert line.startswith(CMD_START)
tests.unget_line()
return parse_cmd_line
# test: ...
def parse_test_line(tests):
assert tests.curr_test_info.test_name == "" # shouldn't be callsed twice
line = tests.next_line()
parts = line.split(":", 2)
tests.curr_test_info.test_name = parts[1].strip()
return select_parsing_function(tests, NEW_TEST, CMD_START, FILE_START)
def parse_new_test(tests):
tests.start_new_test()
return select_parsing_function(tests, TEST_START, CMD_START, FILE_START)
def parse_at_file_start(tests):
while True:
line = tests.next_line()
if None == line:
return None
# skip comments at the top of the file
if line.startswith("#"):
continue
if line == NEW_TEST:
return parse_new_test(tests)
tests.raise_error()
# returns an array of TestInfo objects
def parse_test_file(test_file):
tests = Tests(test_file)
parse_func = parse_at_file_start
while parse_func != None:
parse_func = parse_func(tests)
return tests
def run_ag_and_verify_results(cmd_info):
args = [ag_exe_path()] + cmd_info.cmd.split()
(stdout, stderr, errcmd) = util.run_cmd(*args)
if errcmd != 0:
fatal("Error %d. Stdout:\n'%s'\n Stderr:\n'%s'\n" % (errcmd, stdout, stderr))
if stderr != "":
fatal("Non-empty stderr. Stdout:\n'%s'\n Stderr:\n'%s'\n" % (stdout, stderr))
# TODO: don't know why there's 0 at the end of stdout, so strip it
if len(stdout) > 0 and stdout[-1] == chr(0):
stdout = stdout[:-1]
result = util.normalize_str(stdout)
if len(result) > 0 and result[-1] == '\n':
result = result[:-1]
expected = util.normalize_str(cmd_info.expected)
if result != expected:
fatal("Unexpected value. Stdout:\n'%s'\nExpected:\n'%s'\n" % (result, expected))
def run_one_test(test_info, test_no):
recreate_ag_tests_dir()
for file_info in test_info.files:
file_info.write(ag_tests_dir())
subtests = len(test_info.cmds)
name = str(test_info.test_name)
print("Running test %d (%s), %d subtests" % (test_no, name, subtests))
dir = os.getcwd()
os.chdir(ag_tests_dir())
map(run_ag_and_verify_results, test_info.cmds)
os.chdir(dir)
def run_tests_in_file(test_file):
print(test_file)
tests = parse_test_file(test_file)
print("%d tests in %s" % (len(tests.test_infos), test_file))
test_no = 1
for test_info in tests.test_infos:
run_one_test(test_info, test_no)
test_no += 1
def run_tests():
ag_cmd = ag_exe_path()
if not os.path.exists(ag_cmd):
fatal("Didn't find ag executable. Expected: '%s'" % ag_cmd)
test_files = [os.path.join("testskjk", f) for f in os.listdir("testskjk")]
map(run_tests_in_file, test_files)
# if everything went ok, delete the temporary tests directory
delete_ag_tests_dir()
def verify_ag_exe_exists():
(out, err) = util.run_cmd_throw(ag_exe_path(), "--version")
print(out)
def clean_win():
delete_ag_tests_dir()
delete_dir(os.path.join(top_level_dir(), "rel"))
delete_dir(os.path.join(top_level_dir(), "vs-premake"))
def clean_mac():
delete_ag_tests_dir()
assert False, "TODO: finish implementing clean_mac()"
def clean():
print("Doing a clean rebuild")
if is_win():
clean_win()
elif is_mac():
clean_mac()
else:
assert False, "Unknown platform (not win or mac)"
def main():
#print("top_level_dir = %s\nag_tests_dir = %s\n" % (top_level_dir(), ag_tests_dir()))
verify_started_in_right_directory()
if "-clean" in sys.argv:
clean()
verify_ag_tests_dir_doesnt_exist()
build()
verify_ag_exe_exists()
run_tests()
if __name__ == "__main__":
main()
|
{
"content_hash": "6c4f4db0c3b974cd3efd6af6e6f3fe9e",
"timestamp": "",
"source": "github",
"line_count": 420,
"max_line_length": 107,
"avg_line_length": 26.076190476190476,
"alnum_prop": 0.6741234477720964,
"repo_name": "leeonix/the_silver_searcher",
"id": "db75c027324be57cff55f6d6b8a15a4be4e8efd7",
"size": "10975",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scripts/runtests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "628"
},
{
"name": "C",
"bytes": "3232373"
},
{
"name": "C++",
"bytes": "49940"
},
{
"name": "Lua",
"bytes": "6895"
},
{
"name": "Objective-C",
"bytes": "30562"
},
{
"name": "Perl",
"bytes": "15050"
},
{
"name": "Perl6",
"bytes": "1342"
},
{
"name": "Python",
"bytes": "14867"
},
{
"name": "Shell",
"bytes": "3791"
}
],
"symlink_target": ""
}
|
__author__ = "Phil Hendren"
__copyright__ = "Copyright 2014, Mind Candy"
__credits__ = ["Phil Hendren"]
__license__ = "MIT"
__version__ = "1.0"
from app import db
import json
import cPickle
from app.models.schema import Dashboard, Graph, Users
def get_all_dashboards():
return Dashboard.query.all()
def get_dashboard(id):
return Dashboard.query.filter_by(id=id).first()
def get_all_dashboards_for_user(username):
return Dashboard.query.filter_by(owner=username).all()
def get_dashboard_for_user(username):
return Dashboard.query.filter_by(owner=username).first()
def create_dashboard(dashboard, owner):
new = Dashboard(dashboard, owner)
new.layout = cPickle.dumps({})
db.session.add(new)
db.session.commit()
return True
def delete_dashboard(id):
d = get_dashboard(id)
db.session.delete(d)
db.session.commit()
urls = Graph.query.all()
for record in urls:
if record.dashboard_id is None:
db.session.delete(record)
db.session.commit()
return True
def get_url(id):
return Graph.query.filter_by(id=id).first()
def add_url(url, name, id):
d = get_dashboard(id)
g = Graph(url, name, d)
db.session.add(g)
db.session.commit()
return True
def delete_url(id):
u = get_url(id)
db.session.delete(u)
db.session.commit()
return True
def update_graph(id, name, url):
update = get_url(id)
update.url = url
update.name = name
db.session.commit()
return True
def update_layout(json_obj):
layout = json_obj[json_obj.keys()[0]]
# JSON doesn't support integer keys. Grumble grumble.
layout = dict(map(lambda (key, value): (int(key), value), layout.items()))
pickled = cPickle.dumps(layout)
dashboard = get_dashboard(json_obj.keys()[0])
dashboard.layout = pickled
db.session.commit()
return pickled
|
{
"content_hash": "ffc467c662960ec753aae678475b2f99",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 78,
"avg_line_length": 21.454545454545453,
"alnum_prop": 0.652542372881356,
"repo_name": "mindcandy/graphite-boards",
"id": "5b81c6dc1455c7494b9d1b77854a269d86de202d",
"size": "1888",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/modules/gendash.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14517"
},
{
"name": "JavaScript",
"bytes": "279398"
},
{
"name": "PHP",
"bytes": "25856"
},
{
"name": "Python",
"bytes": "16187"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/intangible/pet/shared_falumpaset_hue.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "771d1775435e02d5c45aeca96386d160",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 68,
"avg_line_length": 21.692307692307693,
"alnum_prop": 0.6808510638297872,
"repo_name": "obi-two/Rebelion",
"id": "2b4f1f0b80343d4da502f14fb8745ea1f05f149b",
"size": "427",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/intangible/pet/shared_falumpaset_hue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
from setuptools.command.develop import develop
import glob
import os
class DownloadPolyglotData(develop):
def run(self):
os.system('polyglot download embeddings2.no ner2.no')
os.system('polyglot download embeddings2.en ner2.en')
data_files = []
for folder, _, filenames in os.walk('gransk/web'):
if 'node_modules' in folder:
continue
for filename in filenames:
data_files.append((folder, [os.path.join(folder, filename)]))
data_files.append(('utils', [x for x in glob.glob('utils/*') if os.path.isfile(x)]))
data_files.append('config.yml')
setup(
name='gransk',
author='Petter Christian Bjelland',
version='0.3',
author_email='petter.bjelland@gmail.com',
description='',
license='',
packages=find_packages('.', exclude=['*.py', '*.tests']),
data_files=data_files,
cmdclass={
'download': DownloadPolyglotData
}
)
|
{
"content_hash": "eda891bc6d1884e7fa9b202e93428ed4",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 84,
"avg_line_length": 25.973684210526315,
"alnum_prop": 0.6433637284701115,
"repo_name": "pcbje/gransk",
"id": "f997059f28f885375f79fbc3525efbb60f5aca5f",
"size": "1010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "10819"
},
{
"name": "HTML",
"bytes": "21524"
},
{
"name": "JavaScript",
"bytes": "88057"
},
{
"name": "PowerShell",
"bytes": "317"
},
{
"name": "Python",
"bytes": "142629"
},
{
"name": "Shell",
"bytes": "4522"
}
],
"symlink_target": ""
}
|
import sys
sys.path.append('../')
import unittest
from suds.sax.date import Timezone as Tz
from suds.xsd.sxbuiltin import *
from unittest import TestCase
from tests import *
setup_logging()
class Date(XDate):
def __init__(self):
pass
class Time(XTime):
def __init__(self):
pass
class DateTime(XDateTime):
def __init__(self):
pass
class DateTest(TestCase):
def testSimple(self):
ref = dt.date(1941, 12, 7)
s = '%.4d-%.2d-%.2d' % (ref.year, ref.month, ref.day)
xdate = Date()
d = xdate.translate(s)
self.assertEqual(d, ref)
def testNegativeTimezone(self):
self.equalsTimezone(-6)
def testPositiveTimezone(self):
self.equalsTimezone(6)
def testUtcTimezone(self):
Timezone.LOCAL = 0
ref = dt.date(1941, 12, 7)
s = '%.4d-%.2d-%.2dZ' % (ref.year, ref.month, ref.day)
xdate = Date()
d = xdate.translate(s)
self.assertEqual(d, ref)
def equalsTimezone(self, tz):
Timezone.LOCAL = tz
ref = dt.date(1941, 12, 7)
s = '%.4d-%.2d-%.2d%+.2d:00' % (ref.year, ref.month, ref.day, tz)
xdate = Date()
d = xdate.translate(s)
self.assertEqual(d, ref)
class TimeTest(TestCase):
def testSimple(self):
ref = dt.time(10, 30, 22)
s = '%.2d:%.2d:%.2d' % (ref.hour, ref.minute, ref.second)
xtime = Time()
t = xtime.translate(s)
self.assertEqual(t, ref)
def testSimpleWithShortMicrosecond(self):
ref = dt.time(10, 30, 22, 34)
s = '%.2d:%.2d:%.2d.%4.d' % (ref.hour, ref.minute, ref.second, ref.microsecond)
xtime = Time()
t = xtime.translate(s)
self.assertEqual(t, ref)
def testSimpleWithMicrosecond(self):
ref = dt.time(10, 30, 22, 999999)
s = '%.2d:%.2d:%.2d.%4.d' % (ref.hour, ref.minute, ref.second, ref.microsecond)
xtime = Time()
t = xtime.translate(s)
self.assertEqual(t, ref)
def testSimpleWithLongMicrosecond(self):
ref = dt.time(10, 30, 22, 999999)
s = '%.2d:%.2d:%.2d.%4.d' % (ref.hour, ref.minute, ref.second, int('999999999'))
xtime = Time()
t = xtime.translate(s)
self.assertEqual(t, ref)
def testPositiveTimezone(self):
self.equalsTimezone(6)
def testNegativeTimezone(self):
self.equalsTimezone(-6)
def testUtcTimezone(self):
Timezone.LOCAL = 0
ref = dt.time(10, 30, 22)
s = '%.2d:%.2d:%.2dZ' % (ref.hour, ref.minute, ref.second)
xtime = Time()
t = xtime.translate(s)
self.assertEqual(t, ref)
def equalsTimezone(self, tz):
Timezone.LOCAL = tz
ref = dt.time(10, 30, 22)
s = self.strTime(ref.hour, ref.minute, ref.second, tz)
xtime = Time()
t = xtime.translate(s)
self.assertEqual(t, ref)
def testConvertNegativeToGreaterNegative(self):
Timezone.LOCAL = -6
ref = dt.time(10, 30, 22)
s = self.strTime(ref.hour, ref.minute, ref.second, -5)
xtime = Time()
t = xtime.translate(s)
self.assertEqual(ref.hour-1, t.hour)
self.assertEqual(ref.minute, t.minute)
self.assertEqual(ref.second, t.second)
def testConvertNegativeToLesserNegative(self):
Timezone.LOCAL = -5
ref = dt.time(10, 30, 22)
s = self.strTime(ref.hour, ref.minute, ref.second, -6)
xtime = Time()
t = xtime.translate(s)
self.assertEqual(ref.hour+1, t.hour)
self.assertEqual(ref.minute, t.minute)
self.assertEqual(ref.second, t.second)
def testConvertPositiveToGreaterPositive(self):
Timezone.LOCAL = 3
ref = dt.time(10, 30, 22)
s = self.strTime(ref.hour, ref.minute, ref.second, 2)
xtime = Time()
t = xtime.translate(s)
self.assertEqual(ref.hour+1, t.hour)
self.assertEqual(ref.minute, t.minute)
self.assertEqual(ref.second, t.second)
def testConvertPositiveToLesserPositive(self):
Timezone.LOCAL = 2
ref = dt.time(10, 30, 22)
s = self.strTime(ref.hour, ref.minute, ref.second, 3)
xtime = Time()
t = xtime.translate(s)
self.assertEqual(ref.hour-1, t.hour)
self.assertEqual(ref.minute, t.minute)
self.assertEqual(ref.second, t.second)
def testConvertPositiveToNegative(self):
Timezone.LOCAL = -6
ref = dt.time(10, 30, 22)
s = self.strTime(ref.hour, ref.minute, ref.second, 3)
xtime = Time()
t = xtime.translate(s)
self.assertEqual(ref.hour-9, t.hour)
self.assertEqual(ref.minute, t.minute)
self.assertEqual(ref.second, t.second)
def testConvertNegativeToPositive(self):
Timezone.LOCAL = 3
ref = dt.time(10, 30, 22)
s = self.strTime(ref.hour, ref.minute, ref.second, -6)
xtime = Time()
t = xtime.translate(s)
self.assertEqual(ref.hour+9, t.hour)
self.assertEqual(ref.minute, t.minute)
self.assertEqual(ref.second, t.second)
def testConvertNegativeToUtc(self):
Timezone.LOCAL = 0
ref = dt.time(10, 30, 22)
s = self.strTime(ref.hour, ref.minute, ref.second, -6)
xtime = Time()
t = xtime.translate(s)
self.assertEqual(ref.hour+6, t.hour)
self.assertEqual(ref.minute, t.minute)
self.assertEqual(ref.second, t.second)
def testConvertPositiveToUtc(self):
Timezone.LOCAL = 0
ref = dt.time(10, 30, 22)
s = self.strTime(ref.hour, ref.minute, ref.second, 3)
xtime = Time()
t = xtime.translate(s)
self.assertEqual(ref.hour-3, t.hour)
self.assertEqual(ref.minute, t.minute)
self.assertEqual(ref.second, t.second)
def testConvertUtcToPositive(self):
Timezone.LOCAL = 3
ref = dt.time(10, 30, 22)
s = '%.2d:%.2d:%.2dZ' % (ref.hour, ref.minute, ref.second)
xtime = Time()
t = xtime.translate(s)
self.assertEqual(ref.hour+3, t.hour)
self.assertEqual(ref.minute, t.minute)
self.assertEqual(ref.second, t.second)
def testConvertUtcToNegative(self):
Timezone.LOCAL = -6
ref = dt.time(10, 30, 22)
s = '%.2d:%.2d:%.2dZ' % (ref.hour, ref.minute, ref.second)
xtime = Time()
t = xtime.translate(s)
self.assertEqual(ref.hour-6, t.hour)
self.assertEqual(ref.minute, t.minute)
self.assertEqual(ref.second, t.second)
def strTime(self, h, m, s, offset):
return '%.2d:%.2d:%.2d%+.2d:00' % (h, m, s, offset)
class DateTimeTest(TestCase):
def testSimple(self):
ref = dt.datetime(1941, 12, 7, 10, 30, 22)
s = '%.4d-%.2d-%.2dT%.2d:%.2d:%.2d' \
% (ref.year,
ref.month,
ref.day,
ref.hour,
ref.minute,
ref.second)
xdt = DateTime()
t = xdt.translate(s)
self.assertEqual(t, ref)
def testOverflow(self):
ref = dt.datetime(1, 1, 1, 0, 0, 0)
s = '%.4d-%.2d-%.2dT%.2d:%.2d:%.2dZ' \
% (ref.year,
ref.month,
ref.day,
ref.hour,
ref.minute,
ref.second)
xdt = DateTime()
t = xdt.translate(s)
self.assertEqual(t, ref)
def testSimpleWithMicrosecond(self):
ref = dt.datetime(1941, 12, 7, 10, 30, 22, 454)
s = '%.4d-%.2d-%.2dT%.2d:%.2d:%.2d.%.4d' \
% (ref.year,
ref.month,
ref.day,
ref.hour,
ref.minute,
ref.second,
ref.microsecond)
xdt = DateTime()
t = xdt.translate(s)
self.assertEqual(t, ref)
def testPositiveTimezone(self):
self.equalsTimezone(6)
def testNegativeTimezone(self):
self.equalsTimezone(-6)
def testUtcTimezone(self):
Timezone.LOCAL = 0
ref = dt.datetime(1941, 12, 7, 10, 30, 22)
s = '%.4d-%.2d-%.2dT%.2d:%.2d:%.2d' \
% (ref.year,
ref.month,
ref.day,
ref.hour,
ref.minute,
ref.second)
xdt = DateTime()
t = xdt.translate(s)
self.assertEqual(t, ref)
def equalsTimezone(self, tz):
Timezone.LOCAL = tz
ref = dt.datetime(1941, 12, 7, 10, 30, 22)
s = self.strDateTime(
ref.year,
ref.month,
ref.day,
ref.hour,
ref.minute,
ref.second,
tz)
xdt = DateTime()
t = xdt.translate(s)
self.assertEqual(t, ref)
def testConvertNegativeToGreaterNegative(self):
Timezone.LOCAL = -6
ref = dt.datetime(1941, 12, 7, 10, 30, 22)
s = self.strDateTime(
ref.year,
ref.month,
ref.day,
ref.hour,
ref.minute,
ref.second,
-5)
xdt = DateTime()
t = xdt.translate(s)
self.assertEqual(ref.year, t.year)
self.assertEqual(ref.month, t.month)
self.assertEqual(ref.day, t.day)
self.assertEqual(ref.hour-1, t.hour)
self.assertEqual(ref.minute, t.minute)
self.assertEqual(ref.second, t.second)
def testConvertNegativeToLesserNegative(self):
Timezone.LOCAL = -5
ref = dt.datetime(1941, 12, 7, 10, 30, 22)
s = self.strDateTime(
ref.year,
ref.month,
ref.day,
ref.hour,
ref.minute,
ref.second,
-6)
xdt = DateTime()
t = xdt.translate(s)
self.assertEqual(ref.year, t.year)
self.assertEqual(ref.month, t.month)
self.assertEqual(ref.day, t.day)
self.assertEqual(ref.hour+1, t.hour)
self.assertEqual(ref.minute, t.minute)
self.assertEqual(ref.second, t.second)
def testConvertPositiveToGreaterPositive(self):
Timezone.LOCAL = 3
ref = dt.datetime(1941, 12, 7, 10, 30, 22)
s = self.strDateTime(
ref.year,
ref.month,
ref.day,
ref.hour,
ref.minute,
ref.second,
2)
xdt = DateTime()
t = xdt.translate(s)
self.assertEqual(ref.year, t.year)
self.assertEqual(ref.month, t.month)
self.assertEqual(ref.day, t.day)
self.assertEqual(ref.hour+1, t.hour)
self.assertEqual(ref.minute, t.minute)
self.assertEqual(ref.second, t.second)
def testConvertPositiveToLesserPositive(self):
Timezone.LOCAL = 2
ref = dt.datetime(1941, 12, 7, 10, 30, 22)
s = self.strDateTime(
ref.year,
ref.month,
ref.day,
ref.hour,
ref.minute,
ref.second,
3)
xdt = DateTime()
t = xdt.translate(s)
self.assertEqual(ref.year, t.year)
self.assertEqual(ref.month, t.month)
self.assertEqual(ref.day, t.day)
self.assertEqual(ref.hour-1, t.hour)
self.assertEqual(ref.minute, t.minute)
self.assertEqual(ref.second, t.second)
def testConvertPositiveToNegative(self):
Timezone.LOCAL = -6
ref = dt.datetime(1941, 12, 7, 10, 30, 22)
s = self.strDateTime(
ref.year,
ref.month,
ref.day,
ref.hour,
ref.minute,
ref.second,
3)
xdt = DateTime()
t = xdt.translate(s)
self.assertEqual(ref.year, t.year)
self.assertEqual(ref.month, t.month)
self.assertEqual(ref.day, t.day)
self.assertEqual(ref.hour-9, t.hour)
self.assertEqual(ref.minute, t.minute)
self.assertEqual(ref.second, t.second)
def testConvertNegativeToPositive(self):
Timezone.LOCAL = 3
ref = dt.datetime(1941, 12, 7, 10, 30, 22)
s = self.strDateTime(
ref.year,
ref.month,
ref.day,
ref.hour,
ref.minute,
ref.second,
-6)
xdt = DateTime()
t = xdt.translate(s)
self.assertEqual(ref.year, t.year)
self.assertEqual(ref.month, t.month)
self.assertEqual(ref.day, t.day)
self.assertEqual(ref.hour+9, t.hour)
self.assertEqual(ref.minute, t.minute)
self.assertEqual(ref.second, t.second)
def testConvertNegativeToUtc(self):
Timezone.LOCAL = 0
ref = dt.datetime(1941, 12, 7, 10, 30, 22)
s = self.strDateTime(
ref.year,
ref.month,
ref.day,
ref.hour,
ref.minute,
ref.second,
-6)
xdt = DateTime()
t = xdt.translate(s)
self.assertEqual(ref.year, t.year)
self.assertEqual(ref.month, t.month)
self.assertEqual(ref.day, t.day)
self.assertEqual(ref.hour+6, t.hour)
self.assertEqual(ref.minute, t.minute)
self.assertEqual(ref.second, t.second)
def testConvertPositiveToUtc(self):
Timezone.LOCAL = 0
ref = dt.datetime(1941, 12, 7, 10, 30, 22)
s = self.strDateTime(
ref.year,
ref.month,
ref.day,
ref.hour,
ref.minute,
ref.second,
3)
xdt = DateTime()
t = xdt.translate(s)
self.assertEqual(ref.year, t.year)
self.assertEqual(ref.month, t.month)
self.assertEqual(ref.day, t.day)
self.assertEqual(ref.hour-3, t.hour)
self.assertEqual(ref.minute, t.minute)
self.assertEqual(ref.second, t.second)
def testConvertUtcToPositive(self):
Timezone.LOCAL = 3
ref = dt.datetime(1941, 12, 7, 10, 30, 22)
s = '%.4d-%.2d-%.2dT%.2d:%.2d:%.2dZ' \
% (ref.year,
ref.month,
ref.day,
ref.hour,
ref.minute,
ref.second)
xdt = DateTime()
t = xdt.translate(s)
self.assertEqual(ref.year, t.year)
self.assertEqual(ref.month, t.month)
self.assertEqual(ref.day, t.day)
self.assertEqual(ref.hour+3, t.hour)
self.assertEqual(ref.minute, t.minute)
self.assertEqual(ref.second, t.second)
def testConvertUtcToNegative(self):
Timezone.LOCAL = -6
ref = dt.datetime(1941, 12, 7, 10, 30, 22)
s = '%.4d-%.2d-%.2dT%.2d:%.2d:%.2dZ' \
% (ref.year,
ref.month,
ref.day,
ref.hour,
ref.minute,
ref.second)
xdt = DateTime()
t = xdt.translate(s)
self.assertEqual(ref.year, t.year)
self.assertEqual(ref.month, t.month)
self.assertEqual(ref.day, t.day)
self.assertEqual(ref.hour-6, t.hour)
self.assertEqual(ref.minute, t.minute)
self.assertEqual(ref.second, t.second)
def testConvertNegativeToGreaterNegativeAndPreviousDay(self):
Timezone.LOCAL = -6
ref = dt.datetime(1941, 12, 7, 0, 30, 22)
s = self.strDateTime(
ref.year,
ref.month,
ref.day,
ref.hour,
ref.minute,
ref.second,
-5)
xdt = DateTime()
t = xdt.translate(s)
self.assertEqual(ref.year, t.year)
self.assertEqual(ref.month, t.month)
self.assertEqual(6, t.day)
self.assertEqual(23, t.hour)
self.assertEqual(ref.minute, t.minute)
self.assertEqual(ref.second, t.second)
def testConvertNegativeToLesserNegativeAndNextDay(self):
Timezone.LOCAL = -5
ref = dt.datetime(1941, 12, 7, 23, 30, 22)
s = self.strDateTime(
ref.year,
ref.month,
ref.day,
ref.hour,
ref.minute,
ref.second,
-6)
xdt = DateTime()
t = xdt.translate(s)
self.assertEqual(ref.year, t.year)
self.assertEqual(ref.month, t.month)
self.assertEqual(8, t.day)
self.assertEqual(0, t.hour)
self.assertEqual(ref.minute, t.minute)
self.assertEqual(ref.second, t.second)
def strDateTime(self, Y, M, D, h, m, s, offset):
s = '%.4d-%.2d-%.2dT%.2d:%.2d:%.2d%+.2d:00' \
% (Y, M, D, h, m, s, offset)
return s
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "13f4a0acbe46e2e51e36cfde77d9182e",
"timestamp": "",
"source": "github",
"line_count": 533,
"max_line_length": 88,
"avg_line_length": 32.32645403377111,
"alnum_prop": 0.5174114915844458,
"repo_name": "TinajaLabs/tinajagate",
"id": "ca6c2e6ca8621ae883c6785d69abb392179c4783",
"size": "18063",
"binary": false,
"copies": "64",
"ref": "refs/heads/master",
"path": "downloads/python-suds-0.4/tests/builtin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "89288"
},
{
"name": "Python",
"bytes": "1072165"
},
{
"name": "Shell",
"bytes": "4906"
}
],
"symlink_target": ""
}
|
def cell_array_slicer(data):
""" Iterate over cell components on a vtk cell array
VTK stores the associated point index for each cell in a one
dimensional array based on the following template::
[n, id0, id1, id2, ..., idn, m, id0, ...]
The iterator takes a cell array and returns the point indices for
each cell.
"""
count = 0
collection = []
for value in data:
if count == 0:
collection = []
count = value
else:
collection.append(value)
count -= 1
if count == 0:
yield collection
|
{
"content_hash": "cb0c8aea8355a3a90aaec3ce03ef92fb",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 69,
"avg_line_length": 26.956521739130434,
"alnum_prop": 0.5564516129032258,
"repo_name": "simphony/simphony-mayavi",
"id": "09518f34b46610f58c437af88e362efe018a3448",
"size": "620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simphony_mayavi/core/cell_array_tools.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "382041"
}
],
"symlink_target": ""
}
|
class Solution:
def numRescueBoats(self, people: List[int], limit: int) -> int:
people.sort()
low = total = 0
high = len(people) - 1
while low <= high:
total += 1
if people[low] + people[high] <= limit:
low += 1
high -= 1
return total
|
{
"content_hash": "c130f81b4e16f4aad9c52cd245d7ed8f",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 67,
"avg_line_length": 30.09090909090909,
"alnum_prop": 0.46827794561933533,
"repo_name": "jiadaizhao/LeetCode",
"id": "c05a548502e169c07a32edaa4563e973714e1fbd",
"size": "331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "0801-0900/0881-Boats to Save People/0881-Boats to Save People.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1140864"
},
{
"name": "Java",
"bytes": "34062"
},
{
"name": "Python",
"bytes": "758800"
},
{
"name": "Shell",
"bytes": "698"
},
{
"name": "TSQL",
"bytes": "774"
}
],
"symlink_target": ""
}
|
import subprocess
import sys
from numba.tests.support import TestCase
import unittest
class TestNumbaImport(TestCase):
"""
Test behaviour of importing Numba.
"""
def test_laziness(self):
"""
Importing top-level numba features should not import too many modules.
"""
# A heuristic set of modules that shouldn't be imported immediately
blacklist = [
'cffi',
'distutils',
'numba.cuda',
'numba.cpython.mathimpl',
'numba.cpython.randomimpl',
'numba.tests',
'numba.core.typing.collections',
'numba.core.typing.listdecl',
'numba.core.typing.npdatetime',
]
# Sanity check the modules still exist...
for mod in blacklist:
if mod not in ('cffi',):
__import__(mod)
code = """if 1:
from numba import jit, vectorize
from numba.core import types
import sys
print(list(sys.modules))
"""
popen = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError("process failed with code %s: stderr follows\n%s\n"
% (popen.returncode, err.decode()))
modlist = set(eval(out.strip()))
unexpected = set(blacklist) & set(modlist)
self.assertFalse(unexpected, "some modules unexpectedly imported")
|
{
"content_hash": "94134298133850515090383ee388e359",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 84,
"avg_line_length": 32.1,
"alnum_prop": 0.5526479750778817,
"repo_name": "sklam/numba",
"id": "2d8fff741e75f2eb19bdadd97e2fc8e3fc6de3ae",
"size": "1605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numba/tests/test_import.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6783"
},
{
"name": "C",
"bytes": "638283"
},
{
"name": "C++",
"bytes": "52741"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Python",
"bytes": "7918676"
},
{
"name": "Shell",
"bytes": "7823"
}
],
"symlink_target": ""
}
|
"""Auto-generated file, do not edit by hand. 808 metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_808 = PhoneMetadata(id='001', country_code=808, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='\\d{8}', possible_length=(8,)),
shared_cost=PhoneNumberDesc(national_number_pattern='\\d{8}', example_number='12345678', possible_length=(8,)),
number_format=[NumberFormat(pattern='(\\d{4})(\\d{4})', format='\\1 \\2')],
leading_zero_possible=True)
|
{
"content_hash": "3de6e9fcbe125f5cf0a99ea7d5081a51",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 115,
"avg_line_length": 67.875,
"alnum_prop": 0.7200736648250461,
"repo_name": "samdowd/drumm-farm",
"id": "a60a2a439c68fb04e60f9acd996d4498facb8fa2",
"size": "543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drumm_env/lib/python2.7/site-packages/phonenumbers/data/region_808.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "436732"
},
{
"name": "HTML",
"bytes": "578902"
},
{
"name": "JavaScript",
"bytes": "2356286"
},
{
"name": "Python",
"bytes": "33148901"
},
{
"name": "Roff",
"bytes": "28"
},
{
"name": "Shell",
"bytes": "3220"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
import sorl.thumbnail.fields
from django.conf import settings
import cambiaahora.utils
import ckeditor.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Testimonios',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('titulo', models.CharField(max_length=250, verbose_name='Nombre')),
('slug', models.SlugField(editable=False)),
('foto', sorl.thumbnail.fields.ImageField(upload_to=cambiaahora.utils.get_file_path, null=True, verbose_name='Foto principal', blank=True)),
('fecha', models.DateField(verbose_name='Fecha')),
('texto', ckeditor.fields.RichTextField(verbose_name='Texto')),
('aprobacion', models.IntegerField(default=b'1', verbose_name='Aprobaci\xf3n', choices=[(1, 'Borrador'), (2, 'Aprobado')])),
('idioma', models.IntegerField(default=b'1', verbose_name='Idioma', choices=[(1, 'Espa\xf1ol'), (2, 'English')])),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Testimonio',
'verbose_name_plural': 'Testimonios',
},
),
]
|
{
"content_hash": "de94f2c19e12da16e78b1136be2adcb5",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 156,
"avg_line_length": 43.2,
"alnum_prop": 0.6044973544973545,
"repo_name": "shiminasai/plataforma_fadcanic",
"id": "d9c0394af60aaaafca6ce075a2728da491802c08",
"size": "1536",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cambiaahora/testimonios/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "266182"
},
{
"name": "HTML",
"bytes": "465685"
},
{
"name": "JavaScript",
"bytes": "1047064"
},
{
"name": "Python",
"bytes": "370885"
}
],
"symlink_target": ""
}
|
import collections.abc
import difflib
import io
import mmap
import platform
from typing import BinaryIO, Callable, Collection, Sequence, TypeVar, Union
import numpy as np
import torch
from torchvision._utils import sequence_to_str
__all__ = [
"add_suggestion",
"fromfile",
"ReadOnlyTensorBuffer",
]
def add_suggestion(
msg: str,
*,
word: str,
possibilities: Collection[str],
close_match_hint: Callable[[str], str] = lambda close_match: f"Did you mean '{close_match}'?",
alternative_hint: Callable[
[Sequence[str]], str
] = lambda possibilities: f"Can be {sequence_to_str(possibilities, separate_last='or ')}.",
) -> str:
if not isinstance(possibilities, collections.abc.Sequence):
possibilities = sorted(possibilities)
suggestions = difflib.get_close_matches(word, possibilities, 1)
hint = close_match_hint(suggestions[0]) if suggestions else alternative_hint(possibilities)
if not hint:
return msg
return f"{msg.strip()} {hint}"
D = TypeVar("D")
def _read_mutable_buffer_fallback(file: BinaryIO, count: int, item_size: int) -> bytearray:
# A plain file.read() will give a read-only bytes, so we convert it to bytearray to make it mutable
return bytearray(file.read(-1 if count == -1 else count * item_size))
def fromfile(
file: BinaryIO,
*,
dtype: torch.dtype,
byte_order: str,
count: int = -1,
) -> torch.Tensor:
"""Construct a tensor from a binary file.
.. note::
This function is similar to :func:`numpy.fromfile` with two notable differences:
1. This function only accepts an open binary file, but not a path to it.
2. This function has an additional ``byte_order`` parameter, since PyTorch's ``dtype``'s do not support that
concept.
.. note::
If the ``file`` was opened in update mode, i.e. "r+b" or "w+b", reading data is much faster. Be aware that as
long as the file is still open, inplace operations on the returned tensor will reflect back to the file.
Args:
file (IO): Open binary file.
dtype (torch.dtype): Data type of the underlying data as well as of the returned tensor.
byte_order (str): Byte order of the data. Can be "little" or "big" endian.
count (int): Number of values of the returned tensor. If ``-1`` (default), will read the complete file.
"""
byte_order = "<" if byte_order == "little" else ">"
char = "f" if dtype.is_floating_point else ("i" if dtype.is_signed else "u")
item_size = (torch.finfo if dtype.is_floating_point else torch.iinfo)(dtype).bits // 8
np_dtype = byte_order + char + str(item_size)
buffer: Union[memoryview, bytearray]
if platform.system() != "Windows":
# PyTorch does not support tensors with underlying read-only memory. In case
# - the file has a .fileno(),
# - the file was opened for updating, i.e. 'r+b' or 'w+b',
# - the file is seekable
# we can avoid copying the data for performance. Otherwise we fall back to simply .read() the data and copy it
# to a mutable location afterwards.
try:
buffer = memoryview(mmap.mmap(file.fileno(), 0))[file.tell() :]
# Reading from the memoryview does not advance the file cursor, so we have to do it manually.
file.seek(*(0, io.SEEK_END) if count == -1 else (count * item_size, io.SEEK_CUR))
except (AttributeError, PermissionError, io.UnsupportedOperation):
buffer = _read_mutable_buffer_fallback(file, count, item_size)
else:
# On Windows just trying to call mmap.mmap() on a file that does not support it, may corrupt the internal state
# so no data can be read afterwards. Thus, we simply ignore the possible speed-up.
buffer = _read_mutable_buffer_fallback(file, count, item_size)
# We cannot use torch.frombuffer() directly, since it only supports the native byte order of the system. Thus, we
# read the data with np.frombuffer() with the correct byte order and convert it to the native one with the
# successive .astype() call.
return torch.from_numpy(np.frombuffer(buffer, dtype=np_dtype, count=count).astype(np_dtype[1:], copy=False))
class ReadOnlyTensorBuffer:
def __init__(self, tensor: torch.Tensor) -> None:
self._memory = memoryview(tensor.numpy())
self._cursor: int = 0
def tell(self) -> int:
return self._cursor
def seek(self, offset: int, whence: int = io.SEEK_SET) -> int:
if whence == io.SEEK_SET:
self._cursor = offset
elif whence == io.SEEK_CUR:
self._cursor += offset
pass
elif whence == io.SEEK_END:
self._cursor = len(self._memory) + offset
else:
raise ValueError(
f"'whence' should be ``{io.SEEK_SET}``, ``{io.SEEK_CUR}``, or ``{io.SEEK_END}``, "
f"but got {repr(whence)} instead"
)
return self.tell()
def read(self, size: int = -1) -> bytes:
cursor = self.tell()
offset, whence = (0, io.SEEK_END) if size == -1 else (size, io.SEEK_CUR)
return self._memory[slice(cursor, self.seek(offset, whence))].tobytes()
|
{
"content_hash": "2ae03eedb842fd3ed31be20d3b7cf6c5",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 119,
"avg_line_length": 41.70634920634921,
"alnum_prop": 0.6390104662226451,
"repo_name": "pytorch/vision",
"id": "3dee4b59a7aa8d1046504d8320728bb31a04942c",
"size": "5255",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "torchvision/prototype/utils/_internal.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "20242"
},
{
"name": "C",
"bytes": "930"
},
{
"name": "C++",
"bytes": "366825"
},
{
"name": "CMake",
"bytes": "18266"
},
{
"name": "Cuda",
"bytes": "90174"
},
{
"name": "Dockerfile",
"bytes": "1608"
},
{
"name": "Java",
"bytes": "21833"
},
{
"name": "Objective-C",
"bytes": "2715"
},
{
"name": "Objective-C++",
"bytes": "3284"
},
{
"name": "PowerShell",
"bytes": "2874"
},
{
"name": "Python",
"bytes": "3952070"
},
{
"name": "Ruby",
"bytes": "1086"
},
{
"name": "Shell",
"bytes": "35660"
}
],
"symlink_target": ""
}
|
from java.sql import Connection
from com.ziclix.python.sql import zxJDBC
from datetime import datetime
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.base.features import BaseDatabaseFeatures
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.backends.base.introspection import BaseDatabaseIntrospection, FieldInfo, TableInfo
from django.db.backends.base.client import BaseDatabaseClient
from django.db.backends.base.validation import BaseDatabaseValidation
from django.db.backends.base.creation import BaseDatabaseCreation
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
__all__ = (
'JDBCBaseDatabaseWrapper',
'JDBCBaseDatabaseFeatures',
'JDBCBaseDatabaseOperations',
'JDBCBaseDatabaseIntrospection',
'JDBCBaseDatabaseClient',
'JDBCBaseDatabaseValidation',
'JDBCBaseDatabaseCreation',
'JDBCFieldInfo',
'JDBCTableInfo',
'JDBCBaseDatabaseSchemaEditor',
'JDBCCursorWrapper',
'JDBCConnection',
)
class JDBCBaseDatabaseWrapper(BaseDatabaseWrapper):
"""
Represents a database connection using zxJDBC.
"""
jdbc_default_host = None
jdbc_default_port = None
jdbc_default_name = None
jdbc_driver_class_name = None
jdbc_connection_url_pattern = None
Database = zxJDBC
Error = Database.Error
NotSupportedError = Database.NotSupportedError
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
ProgrammingError = Database.ProgrammingError
def __init__(self, *args, **kwargs):
super(JDBCBaseDatabaseWrapper, self).__init__(*args, **kwargs)
def get_jdbc_settings(self):
settings_dict = dict(self.settings_dict) # copy instead of reference
if not settings_dict.get('HOST', None):
settings_dict['HOST'] = self.jdbc_default_host
if not settings_dict.get('PORT', None):
settings_dict['PORT'] = self.jdbc_default_port
if not settings_dict.get('NAME', None):
settings_dict['NAME'] = self.jdbc_default_name
return settings_dict
def get_jdbc_driver_class_name(self):
return self.jdbc_driver_class_name
def get_jdbc_connection_url(self):
return self.jdbc_connection_url_pattern % self.get_jdbc_settings()
def get_new_jndi_connection(self):
"""
Returns a zxJDBC Connection object obtained from a JNDI data source if
the settings dictionary contains the JNDI_NAME entry on the
DATABASE_OPTIONS dictionary, or None if it doesn't.
:return: zxJDBC Connection
"""
settings_dict = dict(self.settings_dict)
if 'OPTIONS' not in settings_dict:
return None
if 'JNDI_NAME' not in settings_dict['OPTIONS']:
return None
name = settings_dict['OPTIONS']['JNDI_NAME']
props = settings_dict['OPTIONS'].get('JNDI_CONTEXT_OPTIONS', {})
return zxJDBC.lookup(name, keywords=props)
def get_connection_params(self):
settings_dict = dict(self.settings_dict)
# None may be used to connect to the default 'postgres' db
if settings_dict['NAME'] == '':
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
settings_dict['NAME'] = settings_dict['NAME'] or self.jdbc_default_name
return settings_dict
def get_new_connection(self, conn_params):
connection = self.get_new_jndi_connection()
if not connection:
connection = zxJDBC.connect(self.get_jdbc_connection_url(),
conn_params['USER'],
conn_params['PASSWORD'],
self.jdbc_driver_class_name,
**conn_params['OPTIONS'])
self._set_default_isolation_level(connection)
return connection
def create_cursor(self):
return JDBCCursorWrapper(self.connection.cursor())
def _set_autocommit(self, autocommit):
self.connection.autocommit = autocommit
@staticmethod
def _set_default_isolation_level(connection):
"""
Make transactions transparent to all cursors. Must be called by zxJDBC backends
after instantiating a connection.
:param connection: zxJDBC connection
"""
jdbc_connection = connection.__connection__
jdbc_connection.setTransactionIsolation(JDBCConnection.TRANSACTION_READ_COMMITTED)
class JDBCBaseDatabaseOperations(BaseDatabaseOperations):
"""
zxJDBC supports dates, times, datetimes and decimal directly, so we
override the convert methods of django here.
"""
def value_to_db_date(self, value):
return value
def value_to_db_datetime(self, value):
return value
def value_to_db_time(self, value):
return value
def value_to_db_decimal(self, value, max_digits, decimal_places):
return value
def year_lookup_bounds(self, value):
first = datetime(value, 1, 1)
second = datetime(value, 12, 31, 23, 59, 59, 999999)
return [first, second]
class JDBCCursorWrapper(object):
"""
A simple wrapper to do the "%s" -> "?" replacement before running zxJDBC's
execute or executemany.
"""
def __init__(self, cursor):
self.cursor = cursor
def __get_arraysize(self):
return self.cursor.arraysize
def __set_arraysize(self, size):
self.cursor.arraysize = size
def __get_rowcount(self):
if self.cursor.updatecount > self.cursor.rowcount:
return self.cursor.updatecount
return self.cursor.rowcount
def __getattr__(self, attr):
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.next, None)
def execute(self, sql, params=None):
if not params:
params = tuple()
sql = sql % (('?',) * len(params))
self.cursor.execute(sql, params)
def executemany(self, sql, param_list):
if len(param_list) > 0:
sql = sql % (('?',) * len(param_list[0]))
self.cursor.executemany(sql, param_list)
def callproc(self, procname, parameters=None):
return self.cursor.callproc(procname, parameters)
def close(self):
return self.cursor.close()
def fetchone(self):
try:
return self.cursor.fetchone()
except JDBCBaseDatabaseWrapper.DatabaseError:
return None
def fetchmany(self, size=None):
if not size:
size = self.cursor.arraysize
# `fetchmany` may rise an IndexError if the result set is
# smaller than the size parameter. We fallback to `fetchall`
# in that case.
try:
return self.cursor.fetchmany(size)
except (IndexError, JDBCBaseDatabaseWrapper.DatabaseError):
return self.cursor.fetchall()
def fetchall(self):
try:
return self.cursor.fetchall()
except (IndexError, JDBCBaseDatabaseWrapper.DatabaseError):
return []
def nextset(self):
return self.cursor.nextset()
def setinputsizes(self, sizes):
return self.cursor.setinputsizes(sizes)
def setoutputsize(self, sizes, column=None):
return self.cursor.setoutputsize(sizes, column)
arraysize = property(fget=__get_arraysize, fset=__set_arraysize)
rowcount = property(fget=__get_rowcount)
class JDBCBaseDatabaseFeatures(BaseDatabaseFeatures):
needs_datetime_string_cast = False
class JDBCBaseDatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = {
zxJDBC.BIGINT: 'BigIntegerField',
zxJDBC.BINARY: 'BinaryField',
zxJDBC.BIT: 'BooleanField',
zxJDBC.BLOB: 'BinaryField',
zxJDBC.BOOLEAN: 'BooleanField',
zxJDBC.CHAR: 'CharField',
zxJDBC.CLOB: 'TextField',
zxJDBC.DATE: 'DateField',
zxJDBC.DATETIME: 'DateTimeField',
zxJDBC.DECIMAL: 'DecimalField',
zxJDBC.DOUBLE: 'FloatField',
zxJDBC.FLOAT: 'FloatField',
zxJDBC.INTEGER: 'IntegerField',
zxJDBC.LONGNVARCHAR: 'TextField',
zxJDBC.LONGVARBINARY: 'BinaryField',
zxJDBC.LONGVARCHAR: 'TextField',
zxJDBC.NCHAR: 'CharField',
zxJDBC.NCLOB: 'TextField',
zxJDBC.NUMBER: 'IntegerField',
zxJDBC.NVARCHAR: 'CharField',
zxJDBC.REAL: 'FloatField',
zxJDBC.SMALLINT: 'SmallIntegerField',
zxJDBC.STRING: 'TextField',
zxJDBC.TIME: 'TimeField',
zxJDBC.TIMESTAMP: 'DateTimeField',
zxJDBC.TINYINT: 'SmallIntegerField',
zxJDBC.VARBINARY: 'BinaryField',
zxJDBC.VARCHAR: 'CharField',
}
class JDBCBaseDatabaseClient(BaseDatabaseClient):
pass
class JDBCBaseDatabaseValidation(BaseDatabaseValidation):
pass
class JDBCBaseDatabaseCreation(BaseDatabaseCreation):
pass
class JDBCFieldInfo(FieldInfo):
pass
class JDBCTableInfo(TableInfo):
pass
class JDBCBaseDatabaseSchemaEditor(BaseDatabaseSchemaEditor):
pass
class JDBCConnection(Connection):
pass
|
{
"content_hash": "9ddb18d36109ef2af9ec686087400094",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 97,
"avg_line_length": 31.589830508474577,
"alnum_prop": 0.6538255177594162,
"repo_name": "beachmachine/django-jython",
"id": "f66cb987d5f1635ee39f968e03aa898086921ccc",
"size": "9344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doj/db/backends/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "281227"
}
],
"symlink_target": ""
}
|
""" Preprocessing script for the PANACEA corpus. """
import sys, os, re, argparse, codecs, unicodedata
from xml.dom.minidom import parseString
from unidecode import unidecode
import treetaggerwrapper as ttw
def replace_oelig(string):
""" Replace oe ligatures (which are not present in Latin-1). """
string = re.sub(ur"\u0153", "oe", string)
string = re.sub(ur"\u0152", "OE", string)
return string
def replace_quotes(string):
""" Replace quotes that are absent from Latin-1 with Latin-1 equivalents. """
string = re.sub(ur"[\u2018\u2019]", "'", string)
string = re.sub(ur"[\u201C\u201D]", '"', string)
return string
def normalize_chars(string, lang):
"""
Normalize characters in string.
For English, we simply map all characters to an ASCII character
using unidecode.
For French, we start by getting the normal form (C) of the unicode
string, then replace characters that are not present in the
Latin-1 encoding but that we want to keep (o+e ligatures, certains
quotation marks). Finally, we convert the string to Latin-1,
discarding characters that are not in Latin-1, and convert back to
unicode.
"""
if lang.lower()=='en':
return unidecode(string)
elif lang.lower()=='fr':
string = unicodedata.normalize('NFC', string)
string = replace_oelig(string)
string = replace_quotes(string)
string = string.encode('latin-1', errors='ignore')
string = string.decode('latin-1')
return string
else:
print 'ERROR: "{}" not a recognized language.'.format(lang)
return None
def lemma_is_uncertain(string):
"""
Check if a string contains a pipe, which is used by TreeTagger to
indicate that it is unsure of the lemma of some token.
"""
pattern = ".+\|.+"
if re.search(pattern,string):
return True
else:
return False
def get_treetagger_triple(string):
"""
Split a single line from TreeTagger's output to obtain a
(word,pos,lemma) triple.
"""
elems = string.split('\t')
# Lines that don't contain exactly 2 tabs are ignored. These are
# usually lines containing a single <repdns> or <repurl> element
# which TreeTagger uses to indicate that it has replaced the token
# in the previous (token, pos, lemma) triple with a special symbol
# (e.g. dns-remplacé). The replaced text is in the "text"
# attribute of the repdns or repurl element.
if len(elems) == 3:
return elems
else:
return None
def lemmatize(tagger, string):
"""
Use TreeTagger to tokenize a string (at sentence and word level) and lemmatize.
Arguments:
tagger -- an instance of treetaggerwrapper.TreeTagger
string -- a string
"""
sentences = []
current_sent = []
lines = tagger.TagText(string)
for line in lines:
triple = get_treetagger_triple(line)
if triple:
word, pos, lemma = triple
if lemma == u'<unknown>' or lemma_is_uncertain(lemma):
token = word.lower()
else:
token = lemma.lower()
# Replace o+e ligatures
token = replace_oelig(token)
if pos == 'SENT':
if len(current_sent):
current_sent.append(token)
sentences.append(current_sent)
current_sent = []
else:
# The sentence is empty. Ignore the punctuation mark.
pass
else:
current_sent.append(token)
# Make sure that the current sentence is empty, or else add to output.
if len(current_sent):
sentences.append(current_sent)
return sentences
def sent_tokenize(tagger, string):
"""
Use treetagger to tokenize a string at sentence and word level.
Arguments:
tagger -- an instance of treetaggerwrapper.TreeTagger
string -- a string
"""
sentences = []
current_sent = []
lines = tagger.TagText(string)
for line in lines:
triple = get_treetagger_triple(line)
if triple:
word, pos, lemma = triple
token = word.lower()
# Replace o+e ligatures
token = replace_oelig(token)
if pos == 'SENT':
if len(current_sent):
current_sent.append(token)
sentences.append(current_sent)
current_sent = []
else:
# The sentence is empty. Ignore the punctuation mark.
pass
else:
current_sent.append(token)
# Make sure that the current sentence is empty, or else add to output.
if len(current_sent):
sentences.append(current_sent)
return sentences
if __name__ == "__main__":
dsc = (u'Prétraitement du corpus PANACEA (français ou anglais) :'
u' extraction du contenu textuel, normalisation de caractères,'
u' segmentation et lemmatisation (facultative).')
parser = argparse.ArgumentParser(description=dsc)
parser.add_argument('-l', '--lemmatize', action="store_true", required=False,
help=(u"Lemmatiser au moyen de TreeTagger. Sinon, seules "
u"la normalisation et la segmentation sont appliquées."))
parser.add_argument('lang', choices=['EN', 'FR'])
parser.add_argument('input', help=u'Chemin du répertoire contenant les fichiers XML.')
parser.add_argument('output', help=u'Chemin du fichier de sortie.')
args = parser.parse_args()
# Process args
corpus_dir = args.input
if corpus_dir[-1] != '/':
corpus_dir += '/'
if os.path.isfile(args.output):
sys.exit(u'ERREUR : Le fichier {} existe déjà.'.format(args.output))
# Get paths of XML files in corpus
filenames = [x for x in os.listdir(corpus_dir) if x[-3:] == 'xml']
# Initialize tagger
tagger = ttw.TreeTagger(TAGLANG=args.lang.lower(), TAGDIR='/usr/local/TreeTagger',
TAGINENC='utf-8', TAGOUTENC='utf-8')
# Apply preprocessing
MIN_TOKENS_PER_DOC = 50
for i in range(len(filenames)):
# Load file
docname = filenames[i]
docpath = corpus_dir + docname
with codecs.open(docpath, 'r', encoding='utf-8') as f:
doctext = f.read()
xmldoc = parseString(doctext.encode('utf-8'))
paras = []
processed_paras = []
nb_tokens = 0
# Get title
title_stmt = xmldoc.getElementsByTagName('titleStmt')[0]
title_node = title_stmt.getElementsByTagName('title')[0].firstChild
if title_node:
title = title_node.data
paras.append(title)
# Get paragraphs
for p_elem in xmldoc.getElementsByTagName('p'):
p_txt = p_elem.firstChild.data
p_type = p_elem.getAttribute('crawlinfo')
if p_type not in ['boilerplate', 'ooi-lang', 'ooi-length']:
paras.append(p_txt)
# Lemmatize or simply tokenize paragraphs (including title)
for p in paras:
p = normalize_chars(p, args.lang)
if args.lemmatize:
sents = lemmatize(tagger, p)
else:
sents = sent_tokenize(tagger, p)
for s in sents:
nb_tokens += len(s)
processed_paras.append(sents)
# Write doc if it is long enough
if nb_tokens >= MIN_TOKENS_PER_DOC:
with codecs.open(args.output, 'a', encoding='utf-8') as f:
for para in processed_paras:
for sent in para:
f.write(' '.join(sent)+'\n')
f.write('\n')
else:
print u'ATTENTION : Le fichier {} a été exclu (trop court).'.format(docname)
if i % 10 == 0:
print u'{} fichiers traités...'.format(i)
print u'{} fichiers traités. Terminé.\n'.format(len(filenames))
|
{
"content_hash": "6d6f71f2b4cc27a23828a551d138eb94",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 90,
"avg_line_length": 35.59911894273128,
"alnum_prop": 0.5851998515035268,
"repo_name": "gbcolborne/exp_phd",
"id": "8a55d9731323ff522712b37921b9b8e3dc9fee9c",
"size": "8118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "preprocess_PANACEA.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52823"
}
],
"symlink_target": ""
}
|
from .score_cache import ScoreCache
class Empty(ScoreCache):
def lookup(self, *args, **kwargs):
raise KeyError()
def store(self, *args, **kwargs):
return None
|
{
"content_hash": "ba861a5f849f4935898a0ffb20f4d0a4",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 38,
"avg_line_length": 18.7,
"alnum_prop": 0.6310160427807486,
"repo_name": "aetilley/ores",
"id": "875a73b9e15be9c060246a868712c6b4fa08d518",
"size": "187",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ores/score_caches/empty.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43517"
},
{
"name": "R",
"bytes": "9229"
},
{
"name": "Shell",
"bytes": "1113"
}
],
"symlink_target": ""
}
|
import base64
import gzip
import hashlib
import io
import logging
import zlib
from metrics import Metric
from telemetry.page import page_test
# All network metrics are Chrome only for now.
from telemetry.core.backends.chrome_inspector import inspector_network
from telemetry.value import scalar
class NetworkMetricException(page_test.MeasurementFailure):
pass
class HTTPResponse(object):
""" Represents an HTTP response from a timeline event."""
def __init__(self, event):
self._response = (
inspector_network.InspectorNetworkResponseData.FromTimelineEvent(event))
self._remote_port = None
if 'response' in event.args and 'remotePort' in event.args['response']:
self._remote_port = event.args['response']['remotePort']
self._content_length = None
@property
def response(self):
return self._response
@property
def remote_port(self):
return self._remote_port
@property
def url_signature(self):
return hashlib.md5(self.response.url).hexdigest()
@property
def content_length(self):
if self._content_length is None:
self._content_length = self.GetContentLength()
return self._content_length
@property
def has_original_content_length(self):
return 'X-Original-Content-Length' in self.response.headers
@property
def original_content_length(self):
if self.has_original_content_length:
return int(self.response.GetHeader('X-Original-Content-Length'))
return 0
@property
def data_saving_rate(self):
if (self.response.served_from_cache or
not self.has_original_content_length or
self.original_content_length <= 0):
return 0.0
return (float(self.original_content_length - self.content_length) /
self.original_content_length)
def GetContentLengthFromBody(self):
resp = self.response
body, base64_encoded = resp.GetBody()
if not body:
return 0
# The binary data like images, etc is base64_encoded. Decode it to get
# the actualy content length.
if base64_encoded:
decoded = base64.b64decode(body)
return len(decoded)
encoding = resp.GetHeader('Content-Encoding')
if not encoding:
return len(body)
# The response body returned from a timeline event is always decompressed.
# So, we need to compress it to get the actual content length if headers
# say so.
encoding = encoding.lower()
if encoding == 'gzip':
return self.GetGizppedBodyLength(body)
elif encoding == 'deflate':
return len(zlib.compress(body, 9))
else:
raise NetworkMetricException, (
'Unknown Content-Encoding %s for %s' % (encoding, resp.url))
def GetContentLength(self):
cl = 0
try:
cl = self.GetContentLengthFromBody()
except Exception, e:
resp = self.response
logging.warning('Fail to get content length for %s from body: %s',
resp.url[:100], e)
cl_header = resp.GetHeader('Content-Length')
if cl_header:
cl = int(cl_header)
else:
body, _ = resp.GetBody()
if body:
cl = len(body)
return cl
@staticmethod
def GetGizppedBodyLength(body):
if not body:
return 0
bio = io.BytesIO()
try:
with gzip.GzipFile(fileobj=bio, mode="wb", compresslevel=9) as f:
f.write(body.encode('utf-8'))
except Exception, e:
logging.warning('Fail to gzip response body: %s', e)
raise e
return len(bio.getvalue())
class NetworkMetric(Metric):
"""A network metric based on timeline events."""
def __init__(self):
super(NetworkMetric, self).__init__()
# Whether to add detailed result for each sub-resource in a page.
self.add_result_for_resource = False
self.compute_data_saving = False
self._events = None
def Start(self, page, tab):
self._events = None
tab.StartTimelineRecording()
def Stop(self, page, tab):
assert self._events is None
tab.StopTimelineRecording()
def IterResponses(self, tab):
if self._events is None:
self._events = tab.timeline_model.GetAllEventsOfName('HTTPResponse')
if len(self._events) == 0:
return
for e in self._events:
yield self.ResponseFromEvent(e)
def ResponseFromEvent(self, event):
return HTTPResponse(event)
def AddResults(self, tab, results):
content_length = 0
original_content_length = 0
for resp in self.IterResponses(tab):
# Ignore content length calculation for cache hit.
if resp.response.served_from_cache:
continue
resource = resp.response.url
resource_signature = resp.url_signature
cl = resp.content_length
if resp.has_original_content_length:
ocl = resp.original_content_length
if ocl < cl:
logging.warning('original content length (%d) is less than content '
'lenght(%d) for resource %s', ocl, cl, resource)
if self.add_result_for_resource:
results.AddValue(scalar.ScalarValue(
results.current_page,
'resource_data_saving_' + resource_signature, 'percent',
resp.data_saving_rate * 100))
results.AddValue(scalar.ScalarValue(
results.current_page,
'resource_original_content_length_' + resource_signature, 'bytes',
ocl))
original_content_length += ocl
else:
original_content_length += cl
if self.add_result_for_resource:
results.AddValue(scalar.ScalarValue(
results.current_page,
'resource_content_length_' + resource_signature, 'bytes', cl))
content_length += cl
results.AddValue(scalar.ScalarValue(
results.current_page, 'content_length', 'bytes', content_length))
results.AddValue(scalar.ScalarValue(
results.current_page, 'original_content_length', 'bytes',
original_content_length))
if self.compute_data_saving:
if (original_content_length > 0 and
original_content_length >= content_length):
saving = (float(original_content_length-content_length) * 100 /
original_content_length)
results.AddValue(scalar.ScalarValue(
results.current_page, 'data_saving', 'percent', saving))
else:
results.AddValue(scalar.ScalarValue(
results.current_page, 'data_saving', 'percent', 0.0))
|
{
"content_hash": "fb18043c7108889868c58aaee4a3c6d0",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 80,
"avg_line_length": 31.668316831683168,
"alnum_prop": 0.65655776145068,
"repo_name": "hefen1/chromium",
"id": "9c41dcce37d83164f4e227a54418ff52e1ed40a6",
"size": "6560",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/chrome_proxy/integration_tests/network_metrics.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "23829"
},
{
"name": "C",
"bytes": "4050888"
},
{
"name": "C++",
"bytes": "227355953"
},
{
"name": "CSS",
"bytes": "970407"
},
{
"name": "HTML",
"bytes": "28896884"
},
{
"name": "Java",
"bytes": "8494381"
},
{
"name": "JavaScript",
"bytes": "19110753"
},
{
"name": "Makefile",
"bytes": "37978"
},
{
"name": "Objective-C",
"bytes": "1276474"
},
{
"name": "Objective-C++",
"bytes": "7755220"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "264470"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "423501"
},
{
"name": "Python",
"bytes": "7622149"
},
{
"name": "Shell",
"bytes": "478642"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
#!/usr/bin/python3 -i
#
# Copyright (c) 2013-2016 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os,re,sys
from collections import namedtuple
import xml.etree.ElementTree as etree
def write( *args, **kwargs ):
file = kwargs.pop('file',sys.stdout)
end = kwargs.pop( 'end','\n')
file.write( ' '.join([str(arg) for arg in args]) )
file.write( end )
# noneStr - returns string argument, or "" if argument is None.
# Used in converting etree Elements into text.
# str - string to convert
def noneStr(str):
if (str):
return str
else:
return ""
# enquote - returns string argument with surrounding quotes,
# for serialization into Python code.
def enquote(str):
if (str):
return "'" + str + "'"
else:
return None
# Primary sort key for regSortFeatures.
# Sorts by category of the feature name string:
# Core API features (those defined with a <feature> tag)
# ARB/KHR/OES (Khronos extensions)
# other (EXT/vendor extensions)
# This will need changing for Vulkan!
def regSortCategoryKey(feature):
if (feature.elem.tag == 'feature'):
return 0
elif (feature.category == 'ARB' or
feature.category == 'KHR' or
feature.category == 'OES'):
return 1
else:
return 2
# Secondary sort key for regSortFeatures.
# Sorts by extension name.
def regSortNameKey(feature):
return feature.name
# Second sort key for regSortFeatures.
# Sorts by feature version. <extension> elements all have version number "0"
def regSortFeatureVersionKey(feature):
return float(feature.version)
# Tertiary sort key for regSortFeatures.
# Sorts by extension number. <feature> elements all have extension number 0.
def regSortExtensionNumberKey(feature):
return int(feature.number)
# regSortFeatures - default sort procedure for features.
# Sorts by primary key of feature category ('feature' or 'extension')
# then by version number (for features)
# then by extension number (for extensions)
def regSortFeatures(featureList):
featureList.sort(key = regSortExtensionNumberKey)
featureList.sort(key = regSortFeatureVersionKey)
featureList.sort(key = regSortCategoryKey)
# GeneratorOptions - base class for options used during header production
# These options are target language independent, and used by
# Registry.apiGen() and by base OutputGenerator objects.
#
# Members
# filename - name of file to generate, or None to write to stdout.
# apiname - string matching <api> 'apiname' attribute, e.g. 'gl'.
# profile - string specifying API profile , e.g. 'core', or None.
# versions - regex matching API versions to process interfaces for.
# Normally '.*' or '[0-9]\.[0-9]' to match all defined versions.
# emitversions - regex matching API versions to actually emit
# interfaces for (though all requested versions are considered
# when deciding which interfaces to generate). For GL 4.3 glext.h,
# this might be '1\.[2-5]|[2-4]\.[0-9]'.
# defaultExtensions - If not None, a string which must in its
# entirety match the pattern in the "supported" attribute of
# the <extension>. Defaults to None. Usually the same as apiname.
# addExtensions - regex matching names of additional extensions
# to include. Defaults to None.
# removeExtensions - regex matching names of extensions to
# remove (after defaultExtensions and addExtensions). Defaults
# to None.
# sortProcedure - takes a list of FeatureInfo objects and sorts
# them in place to a preferred order in the generated output.
# Default is core API versions, ARB/KHR/OES extensions, all
# other extensions, alphabetically within each group.
# The regex patterns can be None or empty, in which case they match
# nothing.
class GeneratorOptions:
"""Represents options during header production from an API registry"""
def __init__(self,
filename = None,
apiname = None,
profile = None,
versions = '.*',
emitversions = '.*',
defaultExtensions = None,
addExtensions = None,
removeExtensions = None,
sortProcedure = regSortFeatures):
self.filename = filename
self.apiname = apiname
self.profile = profile
self.versions = self.emptyRegex(versions)
self.emitversions = self.emptyRegex(emitversions)
self.defaultExtensions = defaultExtensions
self.addExtensions = self.emptyRegex(addExtensions)
self.removeExtensions = self.emptyRegex(removeExtensions)
self.sortProcedure = sortProcedure
#
# Substitute a regular expression which matches no version
# or extension names for None or the empty string.
def emptyRegex(self,pat):
if (pat == None or pat == ''):
return '_nomatch_^'
else:
return pat
# CGeneratorOptions - subclass of GeneratorOptions.
#
# Adds options used by COutputGenerator objects during C language header
# generation.
#
# Additional members
# prefixText - list of strings to prefix generated header with
# (usually a copyright statement + calling convention macros).
# protectFile - True if multiple inclusion protection should be
# generated (based on the filename) around the entire header.
# protectFeature - True if #ifndef..#endif protection should be
# generated around a feature interface in the header file.
# genFuncPointers - True if function pointer typedefs should be
# generated
# protectProto - If conditional protection should be generated
# around prototype declarations, set to either '#ifdef'
# to require opt-in (#ifdef protectProtoStr) or '#ifndef'
# to require opt-out (#ifndef protectProtoStr). Otherwise
# set to None.
# protectProtoStr - #ifdef/#ifndef symbol to use around prototype
# declarations, if protectProto is set
# apicall - string to use for the function declaration prefix,
# such as APICALL on Windows.
# apientry - string to use for the calling convention macro,
# in typedefs, such as APIENTRY.
# apientryp - string to use for the calling convention macro
# in function pointer typedefs, such as APIENTRYP.
# indentFuncProto - True if prototype declarations should put each
# parameter on a separate line
# indentFuncPointer - True if typedefed function pointers should put each
# parameter on a separate line
# alignFuncParam - if nonzero and parameters are being put on a
# separate line, align parameter names at the specified column
class CGeneratorOptions(GeneratorOptions):
"""Represents options during C interface generation for headers"""
def __init__(self,
filename = None,
apiname = None,
profile = None,
versions = '.*',
emitversions = '.*',
defaultExtensions = None,
addExtensions = None,
removeExtensions = None,
sortProcedure = regSortFeatures,
prefixText = "",
genFuncPointers = True,
protectFile = True,
protectFeature = True,
protectProto = None,
protectProtoStr = None,
apicall = '',
apientry = '',
apientryp = '',
indentFuncProto = True,
indentFuncPointer = False,
alignFuncParam = 0):
GeneratorOptions.__init__(self, filename, apiname, profile,
versions, emitversions, defaultExtensions,
addExtensions, removeExtensions, sortProcedure)
self.prefixText = prefixText
self.genFuncPointers = genFuncPointers
self.protectFile = protectFile
self.protectFeature = protectFeature
self.protectProto = protectProto
self.protectProtoStr = protectProtoStr
self.apicall = apicall
self.apientry = apientry
self.apientryp = apientryp
self.indentFuncProto = indentFuncProto
self.indentFuncPointer = indentFuncPointer
self.alignFuncParam = alignFuncParam
# DocGeneratorOptions - subclass of GeneratorOptions.
#
# Shares many members with CGeneratorOptions, since
# both are writing C-style declarations:
#
# prefixText - list of strings to prefix generated header with
# (usually a copyright statement + calling convention macros).
# apicall - string to use for the function declaration prefix,
# such as APICALL on Windows.
# apientry - string to use for the calling convention macro,
# in typedefs, such as APIENTRY.
# apientryp - string to use for the calling convention macro
# in function pointer typedefs, such as APIENTRYP.
# genDirectory - directory into which to generate include files
# indentFuncProto - True if prototype declarations should put each
# parameter on a separate line
# indentFuncPointer - True if typedefed function pointers should put each
# parameter on a separate line
# alignFuncParam - if nonzero and parameters are being put on a
# separate line, align parameter names at the specified column
#
# Additional members:
#
class DocGeneratorOptions(GeneratorOptions):
"""Represents options during C interface generation for Asciidoc"""
def __init__(self,
filename = None,
apiname = None,
profile = None,
versions = '.*',
emitversions = '.*',
defaultExtensions = None,
addExtensions = None,
removeExtensions = None,
sortProcedure = regSortFeatures,
prefixText = "",
apicall = '',
apientry = '',
apientryp = '',
genDirectory = 'gen',
indentFuncProto = True,
indentFuncPointer = False,
alignFuncParam = 0,
expandEnumerants = True):
GeneratorOptions.__init__(self, filename, apiname, profile,
versions, emitversions, defaultExtensions,
addExtensions, removeExtensions, sortProcedure)
self.prefixText = prefixText
self.apicall = apicall
self.apientry = apientry
self.apientryp = apientryp
self.genDirectory = genDirectory
self.indentFuncProto = indentFuncProto
self.indentFuncPointer = indentFuncPointer
self.alignFuncParam = alignFuncParam
self.expandEnumerants = expandEnumerants
# ThreadGeneratorOptions - subclass of GeneratorOptions.
#
# Adds options used by COutputGenerator objects during C language header
# generation.
#
# Additional members
# prefixText - list of strings to prefix generated header with
# (usually a copyright statement + calling convention macros).
# protectFile - True if multiple inclusion protection should be
# generated (based on the filename) around the entire header.
# protectFeature - True if #ifndef..#endif protection should be
# generated around a feature interface in the header file.
# genFuncPointers - True if function pointer typedefs should be
# generated
# protectProto - True if #ifdef..#endif protection should be
# generated around prototype declarations
# protectProtoStr - #ifdef symbol to use around prototype
# declarations, if protected
# apicall - string to use for the function declaration prefix,
# such as APICALL on Windows.
# apientry - string to use for the calling convention macro,
# in typedefs, such as APIENTRY.
# apientryp - string to use for the calling convention macro
# in function pointer typedefs, such as APIENTRYP.
# indentFuncProto - True if prototype declarations should put each
# parameter on a separate line
# indentFuncPointer - True if typedefed function pointers should put each
# parameter on a separate line
# alignFuncParam - if nonzero and parameters are being put on a
# separate line, align parameter names at the specified column
class ThreadGeneratorOptions(GeneratorOptions):
"""Represents options during C interface generation for headers"""
def __init__(self,
filename = None,
apiname = None,
profile = None,
versions = '.*',
emitversions = '.*',
defaultExtensions = None,
addExtensions = None,
removeExtensions = None,
sortProcedure = regSortFeatures,
prefixText = "",
genFuncPointers = True,
protectFile = True,
protectFeature = True,
protectProto = True,
protectProtoStr = True,
apicall = '',
apientry = '',
apientryp = '',
indentFuncProto = True,
indentFuncPointer = False,
alignFuncParam = 0,
genDirectory = None):
GeneratorOptions.__init__(self, filename, apiname, profile,
versions, emitversions, defaultExtensions,
addExtensions, removeExtensions, sortProcedure)
self.prefixText = prefixText
self.genFuncPointers = genFuncPointers
self.protectFile = protectFile
self.protectFeature = protectFeature
self.protectProto = protectProto
self.protectProtoStr = protectProtoStr
self.apicall = apicall
self.apientry = apientry
self.apientryp = apientryp
self.indentFuncProto = indentFuncProto
self.indentFuncPointer = indentFuncPointer
self.alignFuncParam = alignFuncParam
self.genDirectory = genDirectory
# ParamCheckerGeneratorOptions - subclass of GeneratorOptions.
#
# Adds options used by ParamCheckerOutputGenerator objects during parameter validation
# generation.
#
# Additional members
# prefixText - list of strings to prefix generated header with
# (usually a copyright statement + calling convention macros).
# protectFile - True if multiple inclusion protection should be
# generated (based on the filename) around the entire header.
# protectFeature - True if #ifndef..#endif protection should be
# generated around a feature interface in the header file.
# genFuncPointers - True if function pointer typedefs should be
# generated
# protectProto - If conditional protection should be generated
# around prototype declarations, set to either '#ifdef'
# to require opt-in (#ifdef protectProtoStr) or '#ifndef'
# to require opt-out (#ifndef protectProtoStr). Otherwise
# set to None.
# protectProtoStr - #ifdef/#ifndef symbol to use around prototype
# declarations, if protectProto is set
# apicall - string to use for the function declaration prefix,
# such as APICALL on Windows.
# apientry - string to use for the calling convention macro,
# in typedefs, such as APIENTRY.
# apientryp - string to use for the calling convention macro
# in function pointer typedefs, such as APIENTRYP.
# indentFuncProto - True if prototype declarations should put each
# parameter on a separate line
# indentFuncPointer - True if typedefed function pointers should put each
# parameter on a separate line
# alignFuncParam - if nonzero and parameters are being put on a
# separate line, align parameter names at the specified column
class ParamCheckerGeneratorOptions(GeneratorOptions):
"""Represents options during C interface generation for headers"""
def __init__(self,
filename = None,
apiname = None,
profile = None,
versions = '.*',
emitversions = '.*',
defaultExtensions = None,
addExtensions = None,
removeExtensions = None,
sortProcedure = regSortFeatures,
prefixText = "",
genFuncPointers = True,
protectFile = True,
protectFeature = True,
protectProto = None,
protectProtoStr = None,
apicall = '',
apientry = '',
apientryp = '',
indentFuncProto = True,
indentFuncPointer = False,
alignFuncParam = 0,
genDirectory = None):
GeneratorOptions.__init__(self, filename, apiname, profile,
versions, emitversions, defaultExtensions,
addExtensions, removeExtensions, sortProcedure)
self.prefixText = prefixText
self.genFuncPointers = genFuncPointers
self.protectFile = protectFile
self.protectFeature = protectFeature
self.protectProto = protectProto
self.protectProtoStr = protectProtoStr
self.apicall = apicall
self.apientry = apientry
self.apientryp = apientryp
self.indentFuncProto = indentFuncProto
self.indentFuncPointer = indentFuncPointer
self.alignFuncParam = alignFuncParam
self.genDirectory = genDirectory
# OutputGenerator - base class for generating API interfaces.
# Manages basic logic, logging, and output file control
# Derived classes actually generate formatted output.
#
# ---- methods ----
# OutputGenerator(errFile, warnFile, diagFile)
# errFile, warnFile, diagFile - file handles to write errors,
# warnings, diagnostics to. May be None to not write.
# logMsg(level, *args) - log messages of different categories
# level - 'error', 'warn', or 'diag'. 'error' will also
# raise a UserWarning exception
# *args - print()-style arguments
# setExtMap(map) - specify a dictionary map from extension names to
# numbers, used in creating values for extension enumerants.
# beginFile(genOpts) - start a new interface file
# genOpts - GeneratorOptions controlling what's generated and how
# endFile() - finish an interface file, closing it when done
# beginFeature(interface, emit) - write interface for a feature
# and tag generated features as having been done.
# interface - element for the <version> / <extension> to generate
# emit - actually write to the header only when True
# endFeature() - finish an interface.
# genType(typeinfo,name) - generate interface for a type
# typeinfo - TypeInfo for a type
# genStruct(typeinfo,name) - generate interface for a C "struct" type.
# typeinfo - TypeInfo for a type interpreted as a struct
# genGroup(groupinfo,name) - generate interface for a group of enums (C "enum")
# groupinfo - GroupInfo for a group
# genEnum(enuminfo, name) - generate interface for an enum (constant)
# enuminfo - EnumInfo for an enum
# name - enum name
# genCmd(cmdinfo) - generate interface for a command
# cmdinfo - CmdInfo for a command
# makeCDecls(cmd) - return C prototype and function pointer typedef for a
# <command> Element, as a list of two strings
# cmd - Element for the <command>
# newline() - print a newline to the output file (utility function)
#
class OutputGenerator:
"""Generate specified API interfaces in a specific style, such as a C header"""
def __init__(self,
errFile = sys.stderr,
warnFile = sys.stderr,
diagFile = sys.stdout):
self.outFile = None
self.errFile = errFile
self.warnFile = warnFile
self.diagFile = diagFile
# Internal state
self.featureName = None
self.genOpts = None
self.registry = None
# Used for extension enum value generation
self.extBase = 1000000000
self.extBlockSize = 1000
#
# logMsg - write a message of different categories to different
# destinations.
# level -
# 'diag' (diagnostic, voluminous)
# 'warn' (warning)
# 'error' (fatal error - raises exception after logging)
# *args - print()-style arguments to direct to corresponding log
def logMsg(self, level, *args):
"""Log a message at the given level. Can be ignored or log to a file"""
if (level == 'error'):
strfile = io.StringIO()
write('ERROR:', *args, file=strfile)
if (self.errFile != None):
write(strfile.getvalue(), file=self.errFile)
raise UserWarning(strfile.getvalue())
elif (level == 'warn'):
if (self.warnFile != None):
write('WARNING:', *args, file=self.warnFile)
elif (level == 'diag'):
if (self.diagFile != None):
write('DIAG:', *args, file=self.diagFile)
else:
raise UserWarning(
'*** FATAL ERROR in Generator.logMsg: unknown level:' + level)
#
# enumToValue - parses and converts an <enum> tag into a value.
# Returns a list
# first element - integer representation of the value, or None
# if needsNum is False. The value must be a legal number
# if needsNum is True.
# second element - string representation of the value
# There are several possible representations of values.
# A 'value' attribute simply contains the value.
# A 'bitpos' attribute defines a value by specifying the bit
# position which is set in that value.
# A 'offset','extbase','extends' triplet specifies a value
# as an offset to a base value defined by the specified
# 'extbase' extension name, which is then cast to the
# typename specified by 'extends'. This requires probing
# the registry database, and imbeds knowledge of the
# Vulkan extension enum scheme in this function.
def enumToValue(self, elem, needsNum):
name = elem.get('name')
numVal = None
if ('value' in elem.keys()):
value = elem.get('value')
# print('About to translate value =', value, 'type =', type(value))
if (needsNum):
numVal = int(value, 0)
# If there's a non-integer, numeric 'type' attribute (e.g. 'u' or
# 'ull'), append it to the string value.
# t = enuminfo.elem.get('type')
# if (t != None and t != '' and t != 'i' and t != 's'):
# value += enuminfo.type
self.logMsg('diag', 'Enum', name, '-> value [', numVal, ',', value, ']')
return [numVal, value]
if ('bitpos' in elem.keys()):
value = elem.get('bitpos')
numVal = int(value, 0)
numVal = 1 << numVal
value = '0x%08x' % numVal
self.logMsg('diag', 'Enum', name, '-> bitpos [', numVal, ',', value, ']')
return [numVal, value]
if ('offset' in elem.keys()):
# Obtain values in the mapping from the attributes
enumNegative = False
offset = int(elem.get('offset'),0)
extnumber = int(elem.get('extnumber'),0)
extends = elem.get('extends')
if ('dir' in elem.keys()):
enumNegative = True
self.logMsg('diag', 'Enum', name, 'offset =', offset,
'extnumber =', extnumber, 'extends =', extends,
'enumNegative =', enumNegative)
# Now determine the actual enumerant value, as defined
# in the "Layers and Extensions" appendix of the spec.
numVal = self.extBase + (extnumber - 1) * self.extBlockSize + offset
if (enumNegative):
numVal = -numVal
value = '%d' % numVal
# More logic needed!
self.logMsg('diag', 'Enum', name, '-> offset [', numVal, ',', value, ']')
return [numVal, value]
return [None, None]
#
def beginFile(self, genOpts):
self.genOpts = genOpts
#
# Open specified output file. Not done in constructor since a
# Generator can be used without writing to a file.
if (self.genOpts.filename != None):
if (self.genOpts.genDirectory != None):
self.outFile = open(os.path.join(self.genOpts.genDirectory, self.genOpts.filename), 'w')
else:
self.outFile = open(self.genOpts.filename, 'w')
else:
self.outFile = sys.stdout
def endFile(self):
self.errFile and self.errFile.flush()
self.warnFile and self.warnFile.flush()
self.diagFile and self.diagFile.flush()
self.outFile.flush()
if (self.outFile != sys.stdout and self.outFile != sys.stderr):
self.outFile.close()
self.genOpts = None
#
def beginFeature(self, interface, emit):
self.emit = emit
self.featureName = interface.get('name')
# If there's an additional 'protect' attribute in the feature, save it
self.featureExtraProtect = interface.get('protect')
def endFeature(self):
# Derived classes responsible for emitting feature
self.featureName = None
self.featureExtraProtect = None
# Utility method to validate we're generating something only inside a
# <feature> tag
def validateFeature(self, featureType, featureName):
if (self.featureName == None):
raise UserWarning('Attempt to generate', featureType, name,
'when not in feature')
#
# Type generation
def genType(self, typeinfo, name):
self.validateFeature('type', name)
#
# Struct (e.g. C "struct" type) generation
def genStruct(self, typeinfo, name):
self.validateFeature('struct', name)
#
# Group (e.g. C "enum" type) generation
def genGroup(self, groupinfo, name):
self.validateFeature('group', name)
#
# Enumerant (really, constant) generation
def genEnum(self, enuminfo, name):
self.validateFeature('enum', name)
#
# Command generation
def genCmd(self, cmd, name):
self.validateFeature('command', name)
#
# Utility functions - turn a <proto> <name> into C-language prototype
# and typedef declarations for that name.
# name - contents of <name> tag
# tail - whatever text follows that tag in the Element
def makeProtoName(self, name, tail):
return self.genOpts.apientry + name + tail
def makeTypedefName(self, name, tail):
return '(' + self.genOpts.apientryp + 'PFN_' + name + tail + ')'
#
# makeCParamDecl - return a string which is an indented, formatted
# declaration for a <param> or <member> block (e.g. function parameter
# or structure/union member).
# param - Element (<param> or <member>) to format
# aligncol - if non-zero, attempt to align the nested <name> element
# at this column
def makeCParamDecl(self, param, aligncol):
paramdecl = ' ' + noneStr(param.text)
for elem in param:
text = noneStr(elem.text)
tail = noneStr(elem.tail)
if (elem.tag == 'name' and aligncol > 0):
self.logMsg('diag', 'Aligning parameter', elem.text, 'to column', self.genOpts.alignFuncParam)
# Align at specified column, if possible
paramdecl = paramdecl.rstrip()
oldLen = len(paramdecl)
paramdecl = paramdecl.ljust(aligncol)
newLen = len(paramdecl)
self.logMsg('diag', 'Adjust length of parameter decl from', oldLen, 'to', newLen, ':', paramdecl)
paramdecl += text + tail
return paramdecl
#
# getCParamTypeLength - return the length of the type field is an indented, formatted
# declaration for a <param> or <member> block (e.g. function parameter
# or structure/union member).
# param - Element (<param> or <member>) to identify
def getCParamTypeLength(self, param):
paramdecl = ' ' + noneStr(param.text)
for elem in param:
text = noneStr(elem.text)
tail = noneStr(elem.tail)
if (elem.tag == 'name'):
# Align at specified column, if possible
newLen = len(paramdecl.rstrip())
self.logMsg('diag', 'Identifying length of', elem.text, 'as', newLen)
paramdecl += text + tail
return newLen
#
# makeCDecls - return C prototype and function pointer typedef for a
# command, as a two-element list of strings.
# cmd - Element containing a <command> tag
def makeCDecls(self, cmd):
"""Generate C function pointer typedef for <command> Element"""
proto = cmd.find('proto')
params = cmd.findall('param')
# Begin accumulating prototype and typedef strings
pdecl = self.genOpts.apicall
tdecl = 'typedef '
#
# Insert the function return type/name.
# For prototypes, add APIENTRY macro before the name
# For typedefs, add (APIENTRY *<name>) around the name and
# use the PFN_cmdnameproc naming convention.
# Done by walking the tree for <proto> element by element.
# etree has elem.text followed by (elem[i], elem[i].tail)
# for each child element and any following text
# Leading text
pdecl += noneStr(proto.text)
tdecl += noneStr(proto.text)
# For each child element, if it's a <name> wrap in appropriate
# declaration. Otherwise append its contents and tail contents.
for elem in proto:
text = noneStr(elem.text)
tail = noneStr(elem.tail)
if (elem.tag == 'name'):
pdecl += self.makeProtoName(text, tail)
tdecl += self.makeTypedefName(text, tail)
else:
pdecl += text + tail
tdecl += text + tail
# Now add the parameter declaration list, which is identical
# for prototypes and typedefs. Concatenate all the text from
# a <param> node without the tags. No tree walking required
# since all tags are ignored.
# Uses: self.indentFuncProto
# self.indentFuncPointer
# self.alignFuncParam
# Might be able to doubly-nest the joins, e.g.
# ','.join(('_'.join([l[i] for i in range(0,len(l))])
n = len(params)
# Indented parameters
if n > 0:
indentdecl = '(\n'
for i in range(0,n):
paramdecl = self.makeCParamDecl(params[i], self.genOpts.alignFuncParam)
if (i < n - 1):
paramdecl += ',\n'
else:
paramdecl += ');'
indentdecl += paramdecl
else:
indentdecl = '(void);'
# Non-indented parameters
paramdecl = '('
if n > 0:
for i in range(0,n):
paramdecl += ''.join([t for t in params[i].itertext()])
if (i < n - 1):
paramdecl += ', '
else:
paramdecl += 'void'
paramdecl += ");";
return [ pdecl + indentdecl, tdecl + paramdecl ]
#
def newline(self):
write('', file=self.outFile)
def setRegistry(self, registry):
self.registry = registry
#
# COutputGenerator - subclass of OutputGenerator.
# Generates C-language API interfaces.
#
# ---- methods ----
# COutputGenerator(errFile, warnFile, diagFile) - args as for
# OutputGenerator. Defines additional internal state.
# ---- methods overriding base class ----
# beginFile(genOpts)
# endFile()
# beginFeature(interface, emit)
# endFeature()
# genType(typeinfo,name)
# genStruct(typeinfo,name)
# genGroup(groupinfo,name)
# genEnum(enuminfo, name)
# genCmd(cmdinfo)
class COutputGenerator(OutputGenerator):
"""Generate specified API interfaces in a specific style, such as a C header"""
# This is an ordered list of sections in the header file.
TYPE_SECTIONS = ['include', 'define', 'basetype', 'handle', 'enum',
'group', 'bitmask', 'funcpointer', 'struct']
ALL_SECTIONS = TYPE_SECTIONS + ['commandPointer', 'command']
def __init__(self,
errFile = sys.stderr,
warnFile = sys.stderr,
diagFile = sys.stdout):
OutputGenerator.__init__(self, errFile, warnFile, diagFile)
# Internal state - accumulators for different inner block text
self.sections = dict([(section, []) for section in self.ALL_SECTIONS])
#
def beginFile(self, genOpts):
OutputGenerator.beginFile(self, genOpts)
# C-specific
#
# Multiple inclusion protection & C++ wrappers.
if (genOpts.protectFile and self.genOpts.filename):
headerSym = re.sub('\.h', '_h_',
os.path.basename(self.genOpts.filename)).upper()
write('#ifndef', headerSym, file=self.outFile)
write('#define', headerSym, '1', file=self.outFile)
self.newline()
write('#ifdef __cplusplus', file=self.outFile)
write('extern "C" {', file=self.outFile)
write('#endif', file=self.outFile)
self.newline()
#
# User-supplied prefix text, if any (list of strings)
if (genOpts.prefixText):
for s in genOpts.prefixText:
write(s, file=self.outFile)
#
# Some boilerplate describing what was generated - this
# will probably be removed later since the extensions
# pattern may be very long.
# write('/* Generated C header for:', file=self.outFile)
# write(' * API:', genOpts.apiname, file=self.outFile)
# if (genOpts.profile):
# write(' * Profile:', genOpts.profile, file=self.outFile)
# write(' * Versions considered:', genOpts.versions, file=self.outFile)
# write(' * Versions emitted:', genOpts.emitversions, file=self.outFile)
# write(' * Default extensions included:', genOpts.defaultExtensions, file=self.outFile)
# write(' * Additional extensions included:', genOpts.addExtensions, file=self.outFile)
# write(' * Extensions removed:', genOpts.removeExtensions, file=self.outFile)
# write(' */', file=self.outFile)
def endFile(self):
# C-specific
# Finish C++ wrapper and multiple inclusion protection
self.newline()
write('#ifdef __cplusplus', file=self.outFile)
write('}', file=self.outFile)
write('#endif', file=self.outFile)
if (self.genOpts.protectFile and self.genOpts.filename):
self.newline()
write('#endif', file=self.outFile)
# Finish processing in superclass
OutputGenerator.endFile(self)
def beginFeature(self, interface, emit):
# Start processing in superclass
OutputGenerator.beginFeature(self, interface, emit)
# C-specific
# Accumulate includes, defines, types, enums, function pointer typedefs,
# end function prototypes separately for this feature. They're only
# printed in endFeature().
self.sections = dict([(section, []) for section in self.ALL_SECTIONS])
def endFeature(self):
# C-specific
# Actually write the interface to the output file.
if (self.emit):
self.newline()
if (self.genOpts.protectFeature):
write('#ifndef', self.featureName, file=self.outFile)
# If type declarations are needed by other features based on
# this one, it may be necessary to suppress the ExtraProtect,
# or move it below the 'for section...' loop.
if (self.featureExtraProtect != None):
write('#ifdef', self.featureExtraProtect, file=self.outFile)
write('#define', self.featureName, '1', file=self.outFile)
for section in self.TYPE_SECTIONS:
contents = self.sections[section]
if contents:
write('\n'.join(contents), file=self.outFile)
self.newline()
if (self.genOpts.genFuncPointers and self.sections['commandPointer']):
write('\n'.join(self.sections['commandPointer']), file=self.outFile)
self.newline()
if (self.sections['command']):
if (self.genOpts.protectProto):
write(self.genOpts.protectProto,
self.genOpts.protectProtoStr, file=self.outFile)
write('\n'.join(self.sections['command']), end='', file=self.outFile)
if (self.genOpts.protectProto):
write('#endif', file=self.outFile)
else:
self.newline()
if (self.featureExtraProtect != None):
write('#endif /*', self.featureExtraProtect, '*/', file=self.outFile)
if (self.genOpts.protectFeature):
write('#endif /*', self.featureName, '*/', file=self.outFile)
# Finish processing in superclass
OutputGenerator.endFeature(self)
#
# Append a definition to the specified section
def appendSection(self, section, text):
# self.sections[section].append('SECTION: ' + section + '\n')
self.sections[section].append(text)
#
# Type generation
def genType(self, typeinfo, name):
OutputGenerator.genType(self, typeinfo, name)
typeElem = typeinfo.elem
# If the type is a struct type, traverse the imbedded <member> tags
# generating a structure. Otherwise, emit the tag text.
category = typeElem.get('category')
if (category == 'struct' or category == 'union'):
self.genStruct(typeinfo, name)
else:
# Replace <apientry /> tags with an APIENTRY-style string
# (from self.genOpts). Copy other text through unchanged.
# If the resulting text is an empty string, don't emit it.
s = noneStr(typeElem.text)
for elem in typeElem:
if (elem.tag == 'apientry'):
s += self.genOpts.apientry + noneStr(elem.tail)
else:
s += noneStr(elem.text) + noneStr(elem.tail)
if s:
# Add extra newline after multi-line entries.
if '\n' in s:
s += '\n'
self.appendSection(category, s)
#
# Struct (e.g. C "struct" type) generation.
# This is a special case of the <type> tag where the contents are
# interpreted as a set of <member> tags instead of freeform C
# C type declarations. The <member> tags are just like <param>
# tags - they are a declaration of a struct or union member.
# Only simple member declarations are supported (no nested
# structs etc.)
def genStruct(self, typeinfo, typeName):
OutputGenerator.genStruct(self, typeinfo, typeName)
body = 'typedef ' + typeinfo.elem.get('category') + ' ' + typeName + ' {\n'
# paramdecl = self.makeCParamDecl(typeinfo.elem, self.genOpts.alignFuncParam)
targetLen = 0;
for member in typeinfo.elem.findall('.//member'):
targetLen = max(targetLen, self.getCParamTypeLength(member))
for member in typeinfo.elem.findall('.//member'):
body += self.makeCParamDecl(member, targetLen + 4)
body += ';\n'
body += '} ' + typeName + ';\n'
self.appendSection('struct', body)
#
# Group (e.g. C "enum" type) generation.
# These are concatenated together with other types.
def genGroup(self, groupinfo, groupName):
OutputGenerator.genGroup(self, groupinfo, groupName)
groupElem = groupinfo.elem
expandName = re.sub(r'([0-9a-z_])([A-Z0-9][^A-Z0-9]?)',r'\1_\2',groupName).upper()
expandPrefix = expandName
expandSuffix = ''
expandSuffixMatch = re.search(r'[A-Z][A-Z]+$',groupName)
if expandSuffixMatch:
expandSuffix = '_' + expandSuffixMatch.group()
# Strip off the suffix from the prefix
expandPrefix = expandName.rsplit(expandSuffix, 1)[0]
# Prefix
body = "\ntypedef enum " + groupName + " {\n"
isEnum = ('FLAG_BITS' not in expandPrefix)
# Loop over the nested 'enum' tags. Keep track of the minimum and
# maximum numeric values, if they can be determined; but only for
# core API enumerants, not extension enumerants. This is inferred
# by looking for 'extends' attributes.
minName = None
for elem in groupElem.findall('enum'):
# Convert the value to an integer and use that to track min/max.
# Values of form -(number) are accepted but nothing more complex.
# Should catch exceptions here for more complex constructs. Not yet.
(numVal,strVal) = self.enumToValue(elem, True)
name = elem.get('name')
# Extension enumerants are only included if they are requested
# in addExtensions or match defaultExtensions.
if (elem.get('extname') is None or
re.match(self.genOpts.addExtensions,elem.get('extname')) is not None or
self.genOpts.defaultExtensions == elem.get('supported')):
body += " " + name + " = " + strVal + ",\n"
if (isEnum and elem.get('extends') is None):
if (minName == None):
minName = maxName = name
minValue = maxValue = numVal
elif (numVal < minValue):
minName = name
minValue = numVal
elif (numVal > maxValue):
maxName = name
maxValue = numVal
# Generate min/max value tokens and a range-padding enum. Need some
# additional padding to generate correct names...
if isEnum:
body += " " + expandPrefix + "_BEGIN_RANGE" + expandSuffix + " = " + minName + ",\n"
body += " " + expandPrefix + "_END_RANGE" + expandSuffix + " = " + maxName + ",\n"
body += " " + expandPrefix + "_RANGE_SIZE" + expandSuffix + " = (" + maxName + " - " + minName + " + 1),\n"
body += " " + expandPrefix + "_MAX_ENUM" + expandSuffix + " = 0x7FFFFFFF\n"
# Postfix
body += "} " + groupName + ";"
if groupElem.get('type') == 'bitmask':
section = 'bitmask'
else:
section = 'group'
self.appendSection(section, body)
# Enumerant generation
# <enum> tags may specify their values in several ways, but are usually
# just integers.
def genEnum(self, enuminfo, name):
OutputGenerator.genEnum(self, enuminfo, name)
(numVal,strVal) = self.enumToValue(enuminfo.elem, False)
body = '#define ' + name.ljust(33) + ' ' + strVal
self.appendSection('enum', body)
#
# Command generation
def genCmd(self, cmdinfo, name):
OutputGenerator.genCmd(self, cmdinfo, name)
#
decls = self.makeCDecls(cmdinfo.elem)
self.appendSection('command', decls[0] + '\n')
if (self.genOpts.genFuncPointers):
self.appendSection('commandPointer', decls[1])
# DocOutputGenerator - subclass of OutputGenerator.
# Generates AsciiDoc includes with C-language API interfaces, for reference
# pages and the Vulkan specification. Similar to COutputGenerator, but
# each interface is written into a different file as determined by the
# options, only actual C types are emitted, and none of the boilerplate
# preprocessor code is emitted.
#
# ---- methods ----
# DocOutputGenerator(errFile, warnFile, diagFile) - args as for
# OutputGenerator. Defines additional internal state.
# ---- methods overriding base class ----
# beginFile(genOpts)
# endFile()
# beginFeature(interface, emit)
# endFeature()
# genType(typeinfo,name)
# genStruct(typeinfo,name)
# genGroup(groupinfo,name)
# genEnum(enuminfo, name)
# genCmd(cmdinfo)
class DocOutputGenerator(OutputGenerator):
"""Generate specified API interfaces in a specific style, such as a C header"""
def __init__(self,
errFile = sys.stderr,
warnFile = sys.stderr,
diagFile = sys.stdout):
OutputGenerator.__init__(self, errFile, warnFile, diagFile)
#
def beginFile(self, genOpts):
OutputGenerator.beginFile(self, genOpts)
def endFile(self):
OutputGenerator.endFile(self)
def beginFeature(self, interface, emit):
# Start processing in superclass
OutputGenerator.beginFeature(self, interface, emit)
def endFeature(self):
# Finish processing in superclass
OutputGenerator.endFeature(self)
#
# Generate an include file
#
# directory - subdirectory to put file in
# basename - base name of the file
# contents - contents of the file (Asciidoc boilerplate aside)
def writeInclude(self, directory, basename, contents):
# Create file
filename = self.genOpts.genDirectory + '/' + directory + '/' + basename + '.txt'
self.logMsg('diag', '# Generating include file:', filename)
fp = open(filename, 'w')
# Asciidoc anchor
write('// WARNING: DO NOT MODIFY! This file is automatically generated from the vk.xml registry', file=fp)
write('ifndef::doctype-manpage[]', file=fp)
write('[[{0},{0}]]'.format(basename), file=fp)
write('["source","{basebackend@docbook:c++:cpp}",title=""]', file=fp)
write('endif::doctype-manpage[]', file=fp)
write('ifdef::doctype-manpage[]', file=fp)
write('["source","{basebackend@docbook:c++:cpp}"]', file=fp)
write('endif::doctype-manpage[]', file=fp)
write('------------------------------------------------------------------------------', file=fp)
write(contents, file=fp)
write('------------------------------------------------------------------------------', file=fp)
fp.close()
#
# Type generation
def genType(self, typeinfo, name):
OutputGenerator.genType(self, typeinfo, name)
typeElem = typeinfo.elem
# If the type is a struct type, traverse the imbedded <member> tags
# generating a structure. Otherwise, emit the tag text.
category = typeElem.get('category')
if (category == 'struct' or category == 'union'):
self.genStruct(typeinfo, name)
else:
# Replace <apientry /> tags with an APIENTRY-style string
# (from self.genOpts). Copy other text through unchanged.
# If the resulting text is an empty string, don't emit it.
s = noneStr(typeElem.text)
for elem in typeElem:
if (elem.tag == 'apientry'):
s += self.genOpts.apientry + noneStr(elem.tail)
else:
s += noneStr(elem.text) + noneStr(elem.tail)
if (len(s) > 0):
if (category == 'bitmask'):
self.writeInclude('flags', name, s + '\n')
elif (category == 'enum'):
self.writeInclude('enums', name, s + '\n')
elif (category == 'funcpointer'):
self.writeInclude('funcpointers', name, s+ '\n')
else:
self.logMsg('diag', '# NOT writing include file for type:',
name, 'category: ', category)
else:
self.logMsg('diag', '# NOT writing empty include file for type', name)
#
# Struct (e.g. C "struct" type) generation.
# This is a special case of the <type> tag where the contents are
# interpreted as a set of <member> tags instead of freeform C
# C type declarations. The <member> tags are just like <param>
# tags - they are a declaration of a struct or union member.
# Only simple member declarations are supported (no nested
# structs etc.)
def genStruct(self, typeinfo, typeName):
OutputGenerator.genStruct(self, typeinfo, typeName)
s = 'typedef ' + typeinfo.elem.get('category') + ' ' + typeName + ' {\n'
# paramdecl = self.makeCParamDecl(typeinfo.elem, self.genOpts.alignFuncParam)
targetLen = 0;
for member in typeinfo.elem.findall('.//member'):
targetLen = max(targetLen, self.getCParamTypeLength(member))
for member in typeinfo.elem.findall('.//member'):
s += self.makeCParamDecl(member, targetLen + 4)
s += ';\n'
s += '} ' + typeName + ';'
self.writeInclude('structs', typeName, s)
#
# Group (e.g. C "enum" type) generation.
# These are concatenated together with other types.
def genGroup(self, groupinfo, groupName):
OutputGenerator.genGroup(self, groupinfo, groupName)
groupElem = groupinfo.elem
# See if we need min/max/num/padding at end
expand = self.genOpts.expandEnumerants
if expand:
expandName = re.sub(r'([0-9a-z_])([A-Z0-9][^A-Z0-9]?)',r'\1_\2',groupName).upper()
isEnum = ('FLAG_BITS' not in expandName)
expandPrefix = expandName
expandSuffix = ''
# Look for a suffix
expandSuffixMatch = re.search(r'[A-Z][A-Z]+$',groupName)
if expandSuffixMatch:
expandSuffix = '_' + expandSuffixMatch.group()
# Strip off the suffix from the prefix
expandPrefix = expandName.rsplit(expandSuffix, 1)[0]
# Prefix
s = "typedef enum " + groupName + " {\n"
# Loop over the nested 'enum' tags. Keep track of the minimum and
# maximum numeric values, if they can be determined.
minName = None
for elem in groupElem.findall('enum'):
# Convert the value to an integer and use that to track min/max.
# Values of form -(number) are accepted but nothing more complex.
# Should catch exceptions here for more complex constructs. Not yet.
(numVal,strVal) = self.enumToValue(elem, True)
name = elem.get('name')
# Extension enumerants are only included if they are requested
# in addExtensions or match defaultExtensions.
if (elem.get('extname') is None or
re.match(self.genOpts.addExtensions,elem.get('extname')) is not None or
self.genOpts.defaultExtensions == elem.get('supported')):
s += " " + name + " = " + strVal + ",\n"
if (expand and isEnum and elem.get('extends') is None):
if (minName == None):
minName = maxName = name
minValue = maxValue = numVal
elif (numVal < minValue):
minName = name
minValue = numVal
elif (numVal > maxValue):
maxName = name
maxValue = numVal
# Generate min/max value tokens and a range-padding enum. Need some
# additional padding to generate correct names...
if (expand):
s += "\n"
if isEnum:
s += " " + expandPrefix + "_BEGIN_RANGE" + expandSuffix + " = " + minName + ",\n"
s += " " + expandPrefix + "_END_RANGE" + expandSuffix + " = " + maxName + ",\n"
s += " " + expandPrefix + "_RANGE_SIZE" + expandSuffix + " = (" + maxName + " - " + minName + " + 1),\n"
s += " " + expandPrefix + "_MAX_ENUM" + expandSuffix + " = 0x7FFFFFFF\n"
# Postfix
s += "} " + groupName + ";"
self.writeInclude('enums', groupName, s)
# Enumerant generation
# <enum> tags may specify their values in several ways, but are usually
# just integers.
def genEnum(self, enuminfo, name):
OutputGenerator.genEnum(self, enuminfo, name)
(numVal,strVal) = self.enumToValue(enuminfo.elem, False)
s = '#define ' + name.ljust(33) + ' ' + strVal
self.logMsg('diag', '# NOT writing compile-time constant', name)
# self.writeInclude('consts', name, s)
#
# Command generation
def genCmd(self, cmdinfo, name):
OutputGenerator.genCmd(self, cmdinfo, name)
#
decls = self.makeCDecls(cmdinfo.elem)
self.writeInclude('protos', name, decls[0])
# PyOutputGenerator - subclass of OutputGenerator.
# Generates Python data structures describing API names.
# Similar to DocOutputGenerator, but writes a single
# file.
#
# ---- methods ----
# PyOutputGenerator(errFile, warnFile, diagFile) - args as for
# OutputGenerator. Defines additional internal state.
# ---- methods overriding base class ----
# beginFile(genOpts)
# endFile()
# genType(typeinfo,name)
# genStruct(typeinfo,name)
# genGroup(groupinfo,name)
# genEnum(enuminfo, name)
# genCmd(cmdinfo)
class PyOutputGenerator(OutputGenerator):
"""Generate specified API interfaces in a specific style, such as a C header"""
def __init__(self,
errFile = sys.stderr,
warnFile = sys.stderr,
diagFile = sys.stdout):
OutputGenerator.__init__(self, errFile, warnFile, diagFile)
#
def beginFile(self, genOpts):
OutputGenerator.beginFile(self, genOpts)
for dict in [ 'flags', 'enums', 'structs', 'consts', 'enums',
'consts', 'protos', 'funcpointers' ]:
write(dict, '= {}', file=self.outFile)
def endFile(self):
OutputGenerator.endFile(self)
#
# Add a name from the interface
#
# dict - type of name (see beginFile above)
# name - name to add
# value - A serializable Python value for the name
def addName(self, dict, name, value=None):
write(dict + "['" + name + "'] = ", value, file=self.outFile)
#
# Type generation
# For 'struct' or 'union' types, defer to genStruct() to
# add to the dictionary.
# For 'bitmask' types, add the type name to the 'flags' dictionary,
# with the value being the corresponding 'enums' name defining
# the acceptable flag bits.
# For 'enum' types, add the type name to the 'enums' dictionary,
# with the value being '@STOPHERE@' (because this case seems
# never to happen).
# For 'funcpointer' types, add the type name to the 'funcpointers'
# dictionary.
# For 'handle' and 'define' types, add the handle or #define name
# to the 'struct' dictionary, because that's how the spec sources
# tag these types even though they aren't structs.
def genType(self, typeinfo, name):
OutputGenerator.genType(self, typeinfo, name)
typeElem = typeinfo.elem
# If the type is a struct type, traverse the imbedded <member> tags
# generating a structure. Otherwise, emit the tag text.
category = typeElem.get('category')
if (category == 'struct' or category == 'union'):
self.genStruct(typeinfo, name)
else:
# Extract the type name
# (from self.genOpts). Copy other text through unchanged.
# If the resulting text is an empty string, don't emit it.
count = len(noneStr(typeElem.text))
for elem in typeElem:
count += len(noneStr(elem.text)) + len(noneStr(elem.tail))
if (count > 0):
if (category == 'bitmask'):
requiredEnum = typeElem.get('requires')
self.addName('flags', name, enquote(requiredEnum))
elif (category == 'enum'):
# This case never seems to come up!
# @enums C 'enum' name Dictionary of enumerant names
self.addName('enums', name, enquote('@STOPHERE@'))
elif (category == 'funcpointer'):
self.addName('funcpointers', name, None)
elif (category == 'handle' or category == 'define'):
self.addName('structs', name, None)
else:
write('# Unprocessed type:', name, 'category:', category, file=self.outFile)
else:
write('# Unprocessed type:', name, file=self.outFile)
#
# Struct (e.g. C "struct" type) generation.
#
# Add the struct name to the 'structs' dictionary, with the
# value being an ordered list of the struct member names.
def genStruct(self, typeinfo, typeName):
OutputGenerator.genStruct(self, typeinfo, typeName)
members = [member.text for member in typeinfo.elem.findall('.//member/name')]
self.addName('structs', typeName, members)
#
# Group (e.g. C "enum" type) generation.
# These are concatenated together with other types.
#
# Add the enum type name to the 'enums' dictionary, with
# the value being an ordered list of the enumerant names.
# Add each enumerant name to the 'consts' dictionary, with
# the value being the enum type the enumerant is part of.
def genGroup(self, groupinfo, groupName):
OutputGenerator.genGroup(self, groupinfo, groupName)
groupElem = groupinfo.elem
# @enums C 'enum' name Dictionary of enumerant names
# @consts C enumerant/const name Name of corresponding 'enums' key
# Loop over the nested 'enum' tags. Keep track of the minimum and
# maximum numeric values, if they can be determined.
enumerants = [elem.get('name') for elem in groupElem.findall('enum')]
for name in enumerants:
self.addName('consts', name, enquote(groupName))
self.addName('enums', groupName, enumerants)
# Enumerant generation (compile-time constants)
#
# Add the constant name to the 'consts' dictionary, with the
# value being None to indicate that the constant isn't
# an enumeration value.
def genEnum(self, enuminfo, name):
OutputGenerator.genEnum(self, enuminfo, name)
# @consts C enumerant/const name Name of corresponding 'enums' key
self.addName('consts', name, None)
#
# Command generation
#
# Add the command name to the 'protos' dictionary, with the
# value being an ordered list of the parameter names.
def genCmd(self, cmdinfo, name):
OutputGenerator.genCmd(self, cmdinfo, name)
params = [param.text for param in cmdinfo.elem.findall('param/name')]
self.addName('protos', name, params)
# ValidityOutputGenerator - subclass of OutputGenerator.
# Generates AsciiDoc includes of valid usage information, for reference
# pages and the Vulkan specification. Similar to DocOutputGenerator.
#
# ---- methods ----
# ValidityOutputGenerator(errFile, warnFile, diagFile) - args as for
# OutputGenerator. Defines additional internal state.
# ---- methods overriding base class ----
# beginFile(genOpts)
# endFile()
# beginFeature(interface, emit)
# endFeature()
# genCmd(cmdinfo)
class ValidityOutputGenerator(OutputGenerator):
"""Generate specified API interfaces in a specific style, such as a C header"""
def __init__(self,
errFile = sys.stderr,
warnFile = sys.stderr,
diagFile = sys.stdout):
OutputGenerator.__init__(self, errFile, warnFile, diagFile)
def beginFile(self, genOpts):
OutputGenerator.beginFile(self, genOpts)
def endFile(self):
OutputGenerator.endFile(self)
def beginFeature(self, interface, emit):
# Start processing in superclass
OutputGenerator.beginFeature(self, interface, emit)
def endFeature(self):
# Finish processing in superclass
OutputGenerator.endFeature(self)
def makeParameterName(self, name):
return 'pname:' + name
def makeStructName(self, name):
return 'sname:' + name
def makeBaseTypeName(self, name):
return 'basetype:' + name
def makeEnumerationName(self, name):
return 'elink:' + name
def makeEnumerantName(self, name):
return 'ename:' + name
def makeFLink(self, name):
return 'flink:' + name
#
# Generate an include file
#
# directory - subdirectory to put file in
# basename - base name of the file
# contents - contents of the file (Asciidoc boilerplate aside)
def writeInclude(self, directory, basename, validity, threadsafety, commandpropertiesentry, successcodes, errorcodes):
# Create file
filename = self.genOpts.genDirectory + '/' + directory + '/' + basename + '.txt'
self.logMsg('diag', '# Generating include file:', filename)
fp = open(filename, 'w')
# Asciidoc anchor
write('// WARNING: DO NOT MODIFY! This file is automatically generated from the vk.xml registry', file=fp)
# Valid Usage
if validity is not None:
write('ifndef::doctype-manpage[]', file=fp)
write('.Valid Usage', file=fp)
write('*' * 80, file=fp)
write('endif::doctype-manpage[]', file=fp)
write('ifdef::doctype-manpage[]', file=fp)
write('Valid Usage', file=fp)
write('-----------', file=fp)
write('endif::doctype-manpage[]', file=fp)
write(validity, file=fp, end='')
write('ifndef::doctype-manpage[]', file=fp)
write('*' * 80, file=fp)
write('endif::doctype-manpage[]', file=fp)
write('', file=fp)
# Host Synchronization
if threadsafety is not None:
write('ifndef::doctype-manpage[]', file=fp)
write('.Host Synchronization', file=fp)
write('*' * 80, file=fp)
write('endif::doctype-manpage[]', file=fp)
write('ifdef::doctype-manpage[]', file=fp)
write('Host Synchronization', file=fp)
write('--------------------', file=fp)
write('endif::doctype-manpage[]', file=fp)
write(threadsafety, file=fp, end='')
write('ifndef::doctype-manpage[]', file=fp)
write('*' * 80, file=fp)
write('endif::doctype-manpage[]', file=fp)
write('', file=fp)
# Command Properties - contained within a block, to avoid table numbering
if commandpropertiesentry is not None:
write('ifndef::doctype-manpage[]', file=fp)
write('.Command Properties', file=fp)
write('*' * 80, file=fp)
write('endif::doctype-manpage[]', file=fp)
write('ifdef::doctype-manpage[]', file=fp)
write('Command Properties', file=fp)
write('------------------', file=fp)
write('endif::doctype-manpage[]', file=fp)
write('[options="header", width="100%"]', file=fp)
write('|=====================', file=fp)
write('|Command Buffer Levels|Render Pass Scope|Supported Queue Types', file=fp)
write(commandpropertiesentry, file=fp)
write('|=====================', file=fp)
write('ifndef::doctype-manpage[]', file=fp)
write('*' * 80, file=fp)
write('endif::doctype-manpage[]', file=fp)
write('', file=fp)
# Success Codes - contained within a block, to avoid table numbering
if successcodes is not None or errorcodes is not None:
write('ifndef::doctype-manpage[]', file=fp)
write('.Return Codes', file=fp)
write('*' * 80, file=fp)
write('endif::doctype-manpage[]', file=fp)
write('ifdef::doctype-manpage[]', file=fp)
write('Return Codes', file=fp)
write('------------', file=fp)
write('endif::doctype-manpage[]', file=fp)
if successcodes is not None:
write('ifndef::doctype-manpage[]', file=fp)
write('<<fundamentals-successcodes,Success>>::', file=fp)
write('endif::doctype-manpage[]', file=fp)
write('ifdef::doctype-manpage[]', file=fp)
write('On success, this command returns::', file=fp)
write('endif::doctype-manpage[]', file=fp)
write(successcodes, file=fp)
if errorcodes is not None:
write('ifndef::doctype-manpage[]', file=fp)
write('<<fundamentals-errorcodes,Failure>>::', file=fp)
write('endif::doctype-manpage[]', file=fp)
write('ifdef::doctype-manpage[]', file=fp)
write('On failure, this command returns::', file=fp)
write('endif::doctype-manpage[]', file=fp)
write(errorcodes, file=fp)
write('ifndef::doctype-manpage[]', file=fp)
write('*' * 80, file=fp)
write('endif::doctype-manpage[]', file=fp)
write('', file=fp)
fp.close()
#
# Check if the parameter passed in is a pointer
def paramIsPointer(self, param):
ispointer = False
paramtype = param.find('type')
if paramtype.tail is not None and '*' in paramtype.tail:
ispointer = True
return ispointer
#
# Check if the parameter passed in is a static array
def paramIsStaticArray(self, param):
if param.find('name').tail is not None:
if param.find('name').tail[0] == '[':
return True
#
# Get the length of a parameter that's been identified as a static array
def staticArrayLength(self, param):
paramname = param.find('name')
paramenumsize = param.find('enum')
if paramenumsize is not None:
return paramenumsize.text
else:
return paramname.tail[1:-1]
#
# Check if the parameter passed in is a pointer to an array
def paramIsArray(self, param):
return param.attrib.get('len') is not None
#
# Get the parent of a handle object
def getHandleParent(self, typename):
types = self.registry.findall("types/type")
for elem in types:
if (elem.find("name") is not None and elem.find('name').text == typename) or elem.attrib.get('name') == typename:
return elem.attrib.get('parent')
#
# Check if a parent object is dispatchable or not
def isHandleTypeDispatchable(self, handlename):
handle = self.registry.find("types/type/[name='" + handlename + "'][@category='handle']")
if handle is not None and handle.find('type').text == 'VK_DEFINE_HANDLE':
return True
else:
return False
def isHandleOptional(self, param, params):
# See if the handle is optional
isOptional = False
# Simple, if it's optional, return true
if param.attrib.get('optional') is not None:
return True
# If no validity is being generated, it usually means that validity is complex and not absolute, so let's say yes.
if param.attrib.get('noautovalidity') is not None:
return True
# If the parameter is an array and we haven't already returned, find out if any of the len parameters are optional
if self.paramIsArray(param):
lengths = param.attrib.get('len').split(',')
for length in lengths:
if (length) != 'null-terminated' and (length) != '1':
for otherparam in params:
if otherparam.find('name').text == length:
if otherparam.attrib.get('optional') is not None:
return True
return False
#
# Get the category of a type
def getTypeCategory(self, typename):
types = self.registry.findall("types/type")
for elem in types:
if (elem.find("name") is not None and elem.find('name').text == typename) or elem.attrib.get('name') == typename:
return elem.attrib.get('category')
#
# Make a chunk of text for the end of a parameter if it is an array
def makeAsciiDocPreChunk(self, param, params):
paramname = param.find('name')
paramtype = param.find('type')
# General pre-amble. Check optionality and add stuff.
asciidoc = '* '
if self.paramIsStaticArray(param):
asciidoc += 'Any given element of '
elif self.paramIsArray(param):
lengths = param.attrib.get('len').split(',')
# Find all the parameters that are called out as optional, so we can document that they might be zero, and the array may be ignored
optionallengths = []
for length in lengths:
if (length) != 'null-terminated' and (length) != '1':
for otherparam in params:
if otherparam.find('name').text == length:
if otherparam.attrib.get('optional') is not None:
if self.paramIsPointer(otherparam):
optionallengths.append('the value referenced by ' + self.makeParameterName(length))
else:
optionallengths.append(self.makeParameterName(length))
# Document that these arrays may be ignored if any of the length values are 0
if len(optionallengths) != 0 or param.attrib.get('optional') is not None:
asciidoc += 'If '
if len(optionallengths) != 0:
if len(optionallengths) == 1:
asciidoc += optionallengths[0]
asciidoc += ' is '
else:
asciidoc += ' or '.join(optionallengths)
asciidoc += ' are '
asciidoc += 'not `0`, '
if len(optionallengths) != 0 and param.attrib.get('optional') is not None:
asciidoc += 'and '
if param.attrib.get('optional') is not None:
asciidoc += self.makeParameterName(paramname.text)
asciidoc += ' is not `NULL`, '
elif param.attrib.get('optional') is not None:
# Don't generate this stub for bitflags
if self.getTypeCategory(paramtype.text) != 'bitmask':
if param.attrib.get('optional').split(',')[0] == 'true':
asciidoc += 'If '
asciidoc += self.makeParameterName(paramname.text)
asciidoc += ' is not '
if self.paramIsArray(param) or self.paramIsPointer(param) or self.isHandleTypeDispatchable(paramtype.text):
asciidoc += '`NULL`'
elif self.getTypeCategory(paramtype.text) == 'handle':
asciidoc += 'sname:VK_NULL_HANDLE'
else:
asciidoc += '`0`'
asciidoc += ', '
return asciidoc
#
# Make the generic asciidoc line chunk portion used for all parameters.
# May return an empty string if nothing to validate.
def createValidationLineForParameterIntroChunk(self, param, params, typetext):
asciidoc = ''
paramname = param.find('name')
paramtype = param.find('type')
asciidoc += self.makeAsciiDocPreChunk(param, params)
asciidoc += self.makeParameterName(paramname.text)
asciidoc += ' must: be '
if self.paramIsArray(param):
# Arrays. These are hard to get right, apparently
lengths = param.attrib.get('len').split(',')
if (lengths[0]) == 'null-terminated':
asciidoc += 'a null-terminated '
elif (lengths[0]) == '1':
asciidoc += 'a pointer to '
else:
asciidoc += 'a pointer to an array of '
# Handle equations, which are currently denoted with latex
if 'latexmath:' in lengths[0]:
asciidoc += lengths[0]
else:
asciidoc += self.makeParameterName(lengths[0])
asciidoc += ' '
for length in lengths[1:]:
if (length) == 'null-terminated': # This should always be the last thing. If it ever isn't for some bizarre reason, then this will need some massaging.
asciidoc += 'null-terminated '
elif (length) == '1':
asciidoc += 'pointers to '
else:
asciidoc += 'pointers to arrays of '
# Handle equations, which are currently denoted with latex
if 'latex:' in length:
asciidoc += length
else:
asciidoc += self.makeParameterName(length)
asciidoc += ' '
# Void pointers don't actually point at anything - remove the word "to"
if paramtype.text == 'void':
if lengths[-1] == '1':
if len(lengths) > 1:
asciidoc = asciidoc[:-5] # Take care of the extra s added by the post array chunk function. #HACK#
else:
asciidoc = asciidoc[:-4]
else:
# An array of void values is a byte array.
asciidoc += 'byte'
elif paramtype.text == 'char':
# A null terminated array of chars is a string
if lengths[-1] == 'null-terminated':
asciidoc += 'string'
else:
# Else it's just a bunch of chars
asciidoc += 'char value'
elif param.text is not None:
# If a value is "const" that means it won't get modified, so it must be valid going into the function.
if 'const' in param.text:
typecategory = self.getTypeCategory(paramtype.text)
if (typecategory != 'struct' and typecategory != 'union' and typecategory != 'basetype' and typecategory is not None) or not self.isStructAlwaysValid(paramtype.text):
asciidoc += 'valid '
asciidoc += typetext
# pluralize
if len(lengths) > 1 or (lengths[0] != '1' and lengths[0] != 'null-terminated'):
asciidoc += 's'
elif self.paramIsPointer(param):
# Handle pointers - which are really special case arrays (i.e. they don't have a length)
pointercount = paramtype.tail.count('*')
# Could be multi-level pointers (e.g. ppData - pointer to a pointer). Handle that.
for i in range(0, pointercount):
asciidoc += 'a pointer to '
if paramtype.text == 'void':
# If there's only one pointer, it's optional, and it doesn't point at anything in particular - we don't need any language.
if pointercount == 1 and param.attrib.get('optional') is not None:
return '' # early return
else:
# Pointer to nothing in particular - delete the " to " portion
asciidoc = asciidoc[:-4]
else:
# Add an article for English semantic win
asciidoc += 'a '
# If a value is "const" that means it won't get modified, so it must be valid going into the function.
if param.text is not None and paramtype.text != 'void':
if 'const' in param.text:
asciidoc += 'valid '
asciidoc += typetext
else:
# Non-pointer, non-optional things must be valid
asciidoc += 'a valid '
asciidoc += typetext
if asciidoc != '':
asciidoc += '\n'
# Add additional line for non-optional bitmasks
if self.getTypeCategory(paramtype.text) == 'bitmask':
if param.attrib.get('optional') is None:
asciidoc += '* '
if self.paramIsArray(param):
asciidoc += 'Each element of '
asciidoc += 'pname:'
asciidoc += paramname.text
asciidoc += ' mustnot: be `0`'
asciidoc += '\n'
return asciidoc
def makeAsciiDocLineForParameter(self, param, params, typetext):
if param.attrib.get('noautovalidity') is not None:
return ''
asciidoc = self.createValidationLineForParameterIntroChunk(param, params, typetext)
return asciidoc
# Try to do check if a structure is always considered valid (i.e. there's no rules to its acceptance)
def isStructAlwaysValid(self, structname):
struct = self.registry.find("types/type[@name='" + structname + "']")
params = struct.findall('member')
validity = struct.find('validity')
if validity is not None:
return False
for param in params:
paramname = param.find('name')
paramtype = param.find('type')
typecategory = self.getTypeCategory(paramtype.text)
if paramname.text == 'pNext':
return False
if paramname.text == 'sType':
return False
if paramtype.text == 'void' or paramtype.text == 'char' or self.paramIsArray(param) or self.paramIsPointer(param):
if self.makeAsciiDocLineForParameter(param, params, '') != '':
return False
elif typecategory == 'handle' or typecategory == 'enum' or typecategory == 'bitmask' or param.attrib.get('returnedonly') == 'true':
return False
elif typecategory == 'struct' or typecategory == 'union':
if self.isStructAlwaysValid(paramtype.text) is False:
return False
return True
#
# Make an entire asciidoc line for a given parameter
def createValidationLineForParameter(self, param, params, typecategory):
asciidoc = ''
paramname = param.find('name')
paramtype = param.find('type')
if paramtype.text == 'void' or paramtype.text == 'char':
# Chars and void are special cases - needs care inside the generator functions
# A null-terminated char array is a string, else it's chars.
# An array of void values is a byte array, a void pointer is just a pointer to nothing in particular
asciidoc += self.makeAsciiDocLineForParameter(param, params, '')
elif typecategory == 'bitmask':
bitsname = paramtype.text.replace('Flags', 'FlagBits')
if self.registry.find("enums[@name='" + bitsname + "']") is None:
asciidoc += '* '
asciidoc += self.makeParameterName(paramname.text)
asciidoc += ' must: be `0`'
asciidoc += '\n'
else:
if self.paramIsArray(param):
asciidoc += self.makeAsciiDocLineForParameter(param, params, 'combinations of ' + self.makeEnumerationName(bitsname) + ' value')
else:
asciidoc += self.makeAsciiDocLineForParameter(param, params, 'combination of ' + self.makeEnumerationName(bitsname) + ' values')
elif typecategory == 'handle':
asciidoc += self.makeAsciiDocLineForParameter(param, params, self.makeStructName(paramtype.text) + ' handle')
elif typecategory == 'enum':
asciidoc += self.makeAsciiDocLineForParameter(param, params, self.makeEnumerationName(paramtype.text) + ' value')
elif typecategory == 'struct':
if (self.paramIsArray(param) or self.paramIsPointer(param)) or not self.isStructAlwaysValid(paramtype.text):
asciidoc += self.makeAsciiDocLineForParameter(param, params, self.makeStructName(paramtype.text) + ' structure')
elif typecategory == 'union':
if (self.paramIsArray(param) or self.paramIsPointer(param)) or not self.isStructAlwaysValid(paramtype.text):
asciidoc += self.makeAsciiDocLineForParameter(param, params, self.makeStructName(paramtype.text) + ' union')
elif self.paramIsArray(param) or self.paramIsPointer(param):
asciidoc += self.makeAsciiDocLineForParameter(param, params, self.makeBaseTypeName(paramtype.text) + ' value')
return asciidoc
#
# Make an asciidoc validity entry for a handle's parent object
def makeAsciiDocHandleParent(self, param, params):
asciidoc = ''
paramname = param.find('name')
paramtype = param.find('type')
# Deal with handle parents
handleparent = self.getHandleParent(paramtype.text)
if handleparent is not None:
parentreference = None
for otherparam in params:
if otherparam.find('type').text == handleparent:
parentreference = otherparam.find('name').text
if parentreference is not None:
asciidoc += '* '
if self.isHandleOptional(param, params):
if self.paramIsArray(param):
asciidoc += 'Each element of '
asciidoc += self.makeParameterName(paramname.text)
asciidoc += ' that is a valid handle'
else:
asciidoc += 'If '
asciidoc += self.makeParameterName(paramname.text)
asciidoc += ' is a valid handle, it'
else:
if self.paramIsArray(param):
asciidoc += 'Each element of '
asciidoc += self.makeParameterName(paramname.text)
asciidoc += ' must: have been created, allocated or retrieved from '
asciidoc += self.makeParameterName(parentreference)
asciidoc += '\n'
return asciidoc
#
# Generate an asciidoc validity line for the sType value of a struct
def makeStructureType(self, blockname, param):
asciidoc = '* '
paramname = param.find('name')
paramtype = param.find('type')
asciidoc += self.makeParameterName(paramname.text)
asciidoc += ' must: be '
structuretype = ''
for elem in re.findall(r'(([A-Z][a-z]+)|([A-Z][A-Z]+))', blockname):
if elem[0] == 'Vk':
structuretype += 'VK_STRUCTURE_TYPE_'
else:
structuretype += elem[0].upper()
structuretype += '_'
asciidoc += self.makeEnumerantName(structuretype[:-1])
asciidoc += '\n'
return asciidoc
#
# Generate an asciidoc validity line for the pNext value of a struct
def makeStructureExtensionPointer(self, param):
asciidoc = '* '
paramname = param.find('name')
paramtype = param.find('type')
asciidoc += self.makeParameterName(paramname.text)
validextensionstructs = param.attrib.get('validextensionstructs')
asciidoc += ' must: be `NULL`'
if validextensionstructs is not None:
extensionstructs = ['slink:' + x for x in validextensionstructs.split(',')]
asciidoc += ', or a pointer to a valid instance of '
if len(extensionstructs) == 1:
asciidoc += validextensionstructs
else:
asciidoc += (', ').join(extensionstructs[:-1]) + ' or ' + extensionstructs[-1]
asciidoc += '\n'
return asciidoc
#
# Generate all the valid usage information for a given struct or command
def makeValidUsageStatements(self, cmd, blockname, params, usages):
# Start the asciidoc block for this
asciidoc = ''
handles = []
anyparentedhandlesoptional = False
parentdictionary = {}
arraylengths = set()
for param in params:
paramname = param.find('name')
paramtype = param.find('type')
# Get the type's category
typecategory = self.getTypeCategory(paramtype.text)
# Generate language to independently validate a parameter
if paramtype.text == 'VkStructureType' and paramname.text == 'sType':
asciidoc += self.makeStructureType(blockname, param)
elif paramtype.text == 'void' and paramname.text == 'pNext':
asciidoc += self.makeStructureExtensionPointer(param)
else:
asciidoc += self.createValidationLineForParameter(param, params, typecategory)
# Ensure that any parenting is properly validated, and list that a handle was found
if typecategory == 'handle':
# Don't detect a parent for return values!
if not self.paramIsPointer(param) or (param.text is not None and 'const' in param.text):
parent = self.getHandleParent(paramtype.text)
if parent is not None:
handles.append(param)
# If any param is optional, it affects the output
if self.isHandleOptional(param, params):
anyparentedhandlesoptional = True
# Find the first dispatchable parent
ancestor = parent
while ancestor is not None and not self.isHandleTypeDispatchable(ancestor):
ancestor = self.getHandleParent(ancestor)
# If one was found, add this parameter to the parent dictionary
if ancestor is not None:
if ancestor not in parentdictionary:
parentdictionary[ancestor] = []
if self.paramIsArray(param):
parentdictionary[ancestor].append('the elements of ' + self.makeParameterName(paramname.text))
else:
parentdictionary[ancestor].append(self.makeParameterName(paramname.text))
# Get the array length for this parameter
arraylength = param.attrib.get('len')
if arraylength is not None:
for onelength in arraylength.split(','):
arraylengths.add(onelength)
# For any vkQueue* functions, there might be queue type data
if 'vkQueue' in blockname:
# The queue type must be valid
queuetypes = cmd.attrib.get('queues')
if queuetypes is not None:
queuebits = []
for queuetype in re.findall(r'([^,]+)', queuetypes):
queuebits.append(queuetype.replace('_',' '))
asciidoc += '* '
asciidoc += 'The pname:queue must: support '
if len(queuebits) == 1:
asciidoc += queuebits[0]
else:
asciidoc += (', ').join(queuebits[:-1])
asciidoc += ' or '
asciidoc += queuebits[-1]
asciidoc += ' operations'
asciidoc += '\n'
if 'vkCmd' in blockname:
# The commandBuffer parameter must be being recorded
asciidoc += '* '
asciidoc += 'pname:commandBuffer must: be in the recording state'
asciidoc += '\n'
# The queue type must be valid
queuetypes = cmd.attrib.get('queues')
queuebits = []
for queuetype in re.findall(r'([^,]+)', queuetypes):
queuebits.append(queuetype.replace('_',' '))
asciidoc += '* '
asciidoc += 'The sname:VkCommandPool that pname:commandBuffer was allocated from must: support '
if len(queuebits) == 1:
asciidoc += queuebits[0]
else:
asciidoc += (', ').join(queuebits[:-1])
asciidoc += ' or '
asciidoc += queuebits[-1]
asciidoc += ' operations'
asciidoc += '\n'
# Must be called inside/outside a renderpass appropriately
renderpass = cmd.attrib.get('renderpass')
if renderpass != 'both':
asciidoc += '* This command must: only be called '
asciidoc += renderpass
asciidoc += ' of a render pass instance'
asciidoc += '\n'
# Must be in the right level command buffer
cmdbufferlevel = cmd.attrib.get('cmdbufferlevel')
if cmdbufferlevel != 'primary,secondary':
asciidoc += '* pname:commandBuffer must: be a '
asciidoc += cmdbufferlevel
asciidoc += ' sname:VkCommandBuffer'
asciidoc += '\n'
# Any non-optional arraylengths should specify they must be greater than 0
for param in params:
paramname = param.find('name')
for arraylength in arraylengths:
if paramname.text == arraylength and param.attrib.get('optional') is None:
# Get all the array dependencies
arrays = cmd.findall("param/[@len='" + arraylength + "'][@optional='true']")
# Get all the optional array dependencies, including those not generating validity for some reason
optionalarrays = cmd.findall("param/[@len='" + arraylength + "'][@optional='true']")
optionalarrays.extend(cmd.findall("param/[@len='" + arraylength + "'][@noautovalidity='true']"))
asciidoc += '* '
# Allow lengths to be arbitrary if all their dependents are optional
if len(optionalarrays) == len(arrays) and len(optionalarrays) != 0:
asciidoc += 'If '
if len(optionalarrays) > 1:
asciidoc += 'any of '
for array in optionalarrays[:-1]:
asciidoc += self.makeParameterName(optionalarrays.find('name').text)
asciidoc += ', '
if len(optionalarrays) > 1:
asciidoc += 'and '
asciidoc += self.makeParameterName(optionalarrays[-1].find('name').text)
asciidoc += ' are '
else:
asciidoc += self.makeParameterName(optionalarrays[-1].find('name').text)
asciidoc += ' is '
asciidoc += 'not `NULL`, '
if self.paramIsPointer(param):
asciidoc += 'the value referenced by '
elif self.paramIsPointer(param):
asciidoc += 'The value referenced by '
asciidoc += self.makeParameterName(arraylength)
asciidoc += ' must: be greater than `0`'
asciidoc += '\n'
# Find the parents of all objects referenced in this command
for param in handles:
asciidoc += self.makeAsciiDocHandleParent(param, params)
# Find the common ancestors of objects
noancestorscount = 0
while noancestorscount < len(parentdictionary):
noancestorscount = 0
oldparentdictionary = parentdictionary.copy()
for parent in oldparentdictionary.items():
ancestor = self.getHandleParent(parent[0])
while ancestor is not None and ancestor not in parentdictionary:
ancestor = self.getHandleParent(ancestor)
if ancestor is not None:
parentdictionary[ancestor] += parentdictionary.pop(parent[0])
else:
# No ancestors possible - so count it up
noancestorscount += 1
# Add validation language about common ancestors
for parent in parentdictionary.items():
if len(parent[1]) > 1:
parentlanguage = '* '
parentlanguage += 'Each of '
parentlanguage += ", ".join(parent[1][:-1])
parentlanguage += ' and '
parentlanguage += parent[1][-1]
if anyparentedhandlesoptional is True:
parentlanguage += ' that are valid handles'
parentlanguage += ' must: have been created, allocated or retrieved from the same '
parentlanguage += self.makeStructName(parent[0])
parentlanguage += '\n'
# Capitalize and add to the main language
asciidoc += parentlanguage
# Add in any plain-text validation language that should be added
for usage in usages:
asciidoc += '* '
asciidoc += usage
asciidoc += '\n'
# In case there's nothing to report, return None
if asciidoc == '':
return None
# Delimit the asciidoc block
return asciidoc
def makeThreadSafetyBlock(self, cmd, paramtext):
"""Generate C function pointer typedef for <command> Element"""
paramdecl = ''
# For any vkCmd* functions, the commandBuffer parameter must be being recorded
if cmd.find('proto/name') is not None and 'vkCmd' in cmd.find('proto/name'):
paramdecl += '* '
paramdecl += 'The sname:VkCommandPool that pname:commandBuffer was created from'
paramdecl += '\n'
# Find and add any parameters that are thread unsafe
explicitexternsyncparams = cmd.findall(paramtext + "[@externsync]")
if (explicitexternsyncparams is not None):
for param in explicitexternsyncparams:
externsyncattribs = param.attrib.get('externsync')
paramname = param.find('name')
for externsyncattrib in externsyncattribs.split(','):
paramdecl += '* '
paramdecl += 'Host access to '
if externsyncattrib == 'true':
if self.paramIsArray(param):
paramdecl += 'each member of ' + self.makeParameterName(paramname.text)
elif self.paramIsPointer(param):
paramdecl += 'the object referenced by ' + self.makeParameterName(paramname.text)
else:
paramdecl += self.makeParameterName(paramname.text)
else:
paramdecl += 'pname:'
paramdecl += externsyncattrib
paramdecl += ' must: be externally synchronized\n'
# Find and add any "implicit" parameters that are thread unsafe
implicitexternsyncparams = cmd.find('implicitexternsyncparams')
if (implicitexternsyncparams is not None):
for elem in implicitexternsyncparams:
paramdecl += '* '
paramdecl += 'Host access to '
paramdecl += elem.text
paramdecl += ' must: be externally synchronized\n'
if (paramdecl == ''):
return None
else:
return paramdecl
def makeCommandPropertiesTableEntry(self, cmd, name):
if 'vkCmd' in name:
# Must be called inside/outside a renderpass appropriately
cmdbufferlevel = cmd.attrib.get('cmdbufferlevel')
cmdbufferlevel = (' + \n').join(cmdbufferlevel.title().split(','))
renderpass = cmd.attrib.get('renderpass')
renderpass = renderpass.capitalize()
queues = cmd.attrib.get('queues')
queues = (' + \n').join(queues.upper().split(','))
return '|' + cmdbufferlevel + '|' + renderpass + '|' + queues
elif 'vkQueue' in name:
# Must be called inside/outside a renderpass appropriately
queues = cmd.attrib.get('queues')
if queues is None:
queues = 'Any'
else:
queues = (' + \n').join(queues.upper().split(','))
return '|-|-|' + queues
return None
def makeSuccessCodes(self, cmd, name):
successcodes = cmd.attrib.get('successcodes')
if successcodes is not None:
successcodeentry = ''
successcodes = successcodes.split(',')
return '* ename:' + '\n* ename:'.join(successcodes)
return None
def makeErrorCodes(self, cmd, name):
errorcodes = cmd.attrib.get('errorcodes')
if errorcodes is not None:
errorcodeentry = ''
errorcodes = errorcodes.split(',')
return '* ename:' + '\n* ename:'.join(errorcodes)
return None
#
# Command generation
def genCmd(self, cmdinfo, name):
OutputGenerator.genCmd(self, cmdinfo, name)
#
# Get all the parameters
params = cmdinfo.elem.findall('param')
usageelements = cmdinfo.elem.findall('validity/usage')
usages = []
for usage in usageelements:
usages.append(usage.text)
for usage in cmdinfo.additionalValidity:
usages.append(usage.text)
for usage in cmdinfo.removedValidity:
usages.remove(usage.text)
validity = self.makeValidUsageStatements(cmdinfo.elem, name, params, usages)
threadsafety = self.makeThreadSafetyBlock(cmdinfo.elem, 'param')
commandpropertiesentry = self.makeCommandPropertiesTableEntry(cmdinfo.elem, name)
successcodes = self.makeSuccessCodes(cmdinfo.elem, name)
errorcodes = self.makeErrorCodes(cmdinfo.elem, name)
self.writeInclude('validity/protos', name, validity, threadsafety, commandpropertiesentry, successcodes, errorcodes)
#
# Struct Generation
def genStruct(self, typeinfo, typename):
OutputGenerator.genStruct(self, typeinfo, typename)
# Anything that's only ever returned can't be set by the user, so shouldn't have any validity information.
if typeinfo.elem.attrib.get('returnedonly') is None:
params = typeinfo.elem.findall('member')
usageelements = typeinfo.elem.findall('validity/usage')
usages = []
for usage in usageelements:
usages.append(usage.text)
for usage in typeinfo.additionalValidity:
usages.append(usage.text)
for usage in typeinfo.removedValidity:
usages.remove(usage.text)
validity = self.makeValidUsageStatements(typeinfo.elem, typename, params, usages)
threadsafety = self.makeThreadSafetyBlock(typeinfo.elem, 'member')
self.writeInclude('validity/structs', typename, validity, threadsafety, None, None, None)
else:
# Still generate files for return only structs, in case this state changes later
self.writeInclude('validity/structs', typename, None, None, None, None, None)
#
# Type Generation
def genType(self, typeinfo, typename):
OutputGenerator.genType(self, typeinfo, typename)
category = typeinfo.elem.get('category')
if (category == 'struct' or category == 'union'):
self.genStruct(typeinfo, typename)
# HostSynchronizationOutputGenerator - subclass of OutputGenerator.
# Generates AsciiDoc includes of the externsync parameter table for the
# fundamentals chapter of the Vulkan specification. Similar to
# DocOutputGenerator.
#
# ---- methods ----
# HostSynchronizationOutputGenerator(errFile, warnFile, diagFile) - args as for
# OutputGenerator. Defines additional internal state.
# ---- methods overriding base class ----
# genCmd(cmdinfo)
class HostSynchronizationOutputGenerator(OutputGenerator):
# Generate Host Synchronized Parameters in a table at the top of the spec
def __init__(self,
errFile = sys.stderr,
warnFile = sys.stderr,
diagFile = sys.stdout):
OutputGenerator.__init__(self, errFile, warnFile, diagFile)
threadsafety = {'parameters': '', 'parameterlists': '', 'implicit': ''}
def makeParameterName(self, name):
return 'pname:' + name
def makeFLink(self, name):
return 'flink:' + name
#
# Generate an include file
#
# directory - subdirectory to put file in
# basename - base name of the file
# contents - contents of the file (Asciidoc boilerplate aside)
def writeInclude(self):
if self.threadsafety['parameters'] is not None:
# Create file
filename = self.genOpts.genDirectory + '/' + self.genOpts.filename + '/parameters.txt'
self.logMsg('diag', '# Generating include file:', filename)
fp = open(filename, 'w')
# Host Synchronization
write('// WARNING: DO NOT MODIFY! This file is automatically generated from the vk.xml registry', file=fp)
write('.Externally Synchronized Parameters', file=fp)
write('*' * 80, file=fp)
write(self.threadsafety['parameters'], file=fp, end='')
write('*' * 80, file=fp)
write('', file=fp)
if self.threadsafety['parameterlists'] is not None:
# Create file
filename = self.genOpts.genDirectory + '/' + self.genOpts.filename + '/parameterlists.txt'
self.logMsg('diag', '# Generating include file:', filename)
fp = open(filename, 'w')
# Host Synchronization
write('// WARNING: DO NOT MODIFY! This file is automatically generated from the vk.xml registry', file=fp)
write('.Externally Synchronized Parameter Lists', file=fp)
write('*' * 80, file=fp)
write(self.threadsafety['parameterlists'], file=fp, end='')
write('*' * 80, file=fp)
write('', file=fp)
if self.threadsafety['implicit'] is not None:
# Create file
filename = self.genOpts.genDirectory + '/' + self.genOpts.filename + '/implicit.txt'
self.logMsg('diag', '# Generating include file:', filename)
fp = open(filename, 'w')
# Host Synchronization
write('// WARNING: DO NOT MODIFY! This file is automatically generated from the vk.xml registry', file=fp)
write('.Implicit Externally Synchronized Parameters', file=fp)
write('*' * 80, file=fp)
write(self.threadsafety['implicit'], file=fp, end='')
write('*' * 80, file=fp)
write('', file=fp)
fp.close()
#
# Check if the parameter passed in is a pointer to an array
def paramIsArray(self, param):
return param.attrib.get('len') is not None
# Check if the parameter passed in is a pointer
def paramIsPointer(self, param):
ispointer = False
paramtype = param.find('type')
if paramtype.tail is not None and '*' in paramtype.tail:
ispointer = True
return ispointer
# Turn the "name[].member[]" notation into plain English.
def makeThreadDereferenceHumanReadable(self, dereference):
matches = re.findall(r"[\w]+[^\w]*",dereference)
stringval = ''
for match in reversed(matches):
if '->' in match or '.' in match:
stringval += 'member of '
if '[]' in match:
stringval += 'each element of '
stringval += 'the '
stringval += self.makeParameterName(re.findall(r"[\w]+",match)[0])
stringval += ' '
stringval += 'parameter'
return stringval[0].upper() + stringval[1:]
def makeThreadSafetyBlocks(self, cmd, paramtext):
protoname = cmd.find('proto/name').text
# Find and add any parameters that are thread unsafe
explicitexternsyncparams = cmd.findall(paramtext + "[@externsync]")
if (explicitexternsyncparams is not None):
for param in explicitexternsyncparams:
externsyncattribs = param.attrib.get('externsync')
paramname = param.find('name')
for externsyncattrib in externsyncattribs.split(','):
tempstring = '* '
if externsyncattrib == 'true':
if self.paramIsArray(param):
tempstring += 'Each element of the '
elif self.paramIsPointer(param):
tempstring += 'The object referenced by the '
else:
tempstring += 'The '
tempstring += self.makeParameterName(paramname.text)
tempstring += ' parameter'
else:
tempstring += self.makeThreadDereferenceHumanReadable(externsyncattrib)
tempstring += ' in '
tempstring += self.makeFLink(protoname)
tempstring += '\n'
if ' element of ' in tempstring:
self.threadsafety['parameterlists'] += tempstring
else:
self.threadsafety['parameters'] += tempstring
# Find and add any "implicit" parameters that are thread unsafe
implicitexternsyncparams = cmd.find('implicitexternsyncparams')
if (implicitexternsyncparams is not None):
for elem in implicitexternsyncparams:
self.threadsafety['implicit'] += '* '
self.threadsafety['implicit'] += elem.text[0].upper()
self.threadsafety['implicit'] += elem.text[1:]
self.threadsafety['implicit'] += ' in '
self.threadsafety['implicit'] += self.makeFLink(protoname)
self.threadsafety['implicit'] += '\n'
# For any vkCmd* functions, the commandBuffer parameter must be being recorded
if protoname is not None and 'vkCmd' in protoname:
self.threadsafety['implicit'] += '* '
self.threadsafety['implicit'] += 'The sname:VkCommandPool that pname:commandBuffer was allocated from, in '
self.threadsafety['implicit'] += self.makeFLink(protoname)
self.threadsafety['implicit'] += '\n'
#
# Command generation
def genCmd(self, cmdinfo, name):
OutputGenerator.genCmd(self, cmdinfo, name)
#
# Get all thh parameters
params = cmdinfo.elem.findall('param')
usages = cmdinfo.elem.findall('validity/usage')
self.makeThreadSafetyBlocks(cmdinfo.elem, 'param')
self.writeInclude()
# ThreadOutputGenerator - subclass of OutputGenerator.
# Generates Thread checking framework
#
# ---- methods ----
# ThreadOutputGenerator(errFile, warnFile, diagFile) - args as for
# OutputGenerator. Defines additional internal state.
# ---- methods overriding base class ----
# beginFile(genOpts)
# endFile()
# beginFeature(interface, emit)
# endFeature()
# genType(typeinfo,name)
# genStruct(typeinfo,name)
# genGroup(groupinfo,name)
# genEnum(enuminfo, name)
# genCmd(cmdinfo)
class ThreadOutputGenerator(OutputGenerator):
"""Generate specified API interfaces in a specific style, such as a C header"""
# This is an ordered list of sections in the header file.
TYPE_SECTIONS = ['include', 'define', 'basetype', 'handle', 'enum',
'group', 'bitmask', 'funcpointer', 'struct']
ALL_SECTIONS = TYPE_SECTIONS + ['command']
def __init__(self,
errFile = sys.stderr,
warnFile = sys.stderr,
diagFile = sys.stdout):
OutputGenerator.__init__(self, errFile, warnFile, diagFile)
# Internal state - accumulators for different inner block text
self.sections = dict([(section, []) for section in self.ALL_SECTIONS])
self.intercepts = []
# Check if the parameter passed in is a pointer to an array
def paramIsArray(self, param):
return param.attrib.get('len') is not None
# Check if the parameter passed in is a pointer
def paramIsPointer(self, param):
ispointer = False
for elem in param:
#write('paramIsPointer '+elem.text, file=sys.stderr)
#write('elem.tag '+elem.tag, file=sys.stderr)
#if (elem.tail is None):
# write('elem.tail is None', file=sys.stderr)
#else:
# write('elem.tail '+elem.tail, file=sys.stderr)
if ((elem.tag is not 'type') and (elem.tail is not None)) and '*' in elem.tail:
ispointer = True
# write('is pointer', file=sys.stderr)
return ispointer
def makeThreadUseBlock(self, cmd, functionprefix):
"""Generate C function pointer typedef for <command> Element"""
paramdecl = ''
thread_check_dispatchable_objects = [
"VkCommandBuffer",
"VkDevice",
"VkInstance",
"VkQueue",
]
thread_check_nondispatchable_objects = [
"VkBuffer",
"VkBufferView",
"VkCommandPool",
"VkDescriptorPool",
"VkDescriptorSetLayout",
"VkDeviceMemory",
"VkEvent",
"VkFence",
"VkFramebuffer",
"VkImage",
"VkImageView",
"VkPipeline",
"VkPipelineCache",
"VkPipelineLayout",
"VkQueryPool",
"VkRenderPass",
"VkSampler",
"VkSemaphore",
"VkShaderModule",
]
# Find and add any parameters that are thread unsafe
params = cmd.findall('param')
for param in params:
paramname = param.find('name')
if False: # self.paramIsPointer(param):
paramdecl += ' // not watching use of pointer ' + paramname.text + '\n'
else:
externsync = param.attrib.get('externsync')
if externsync == 'true':
if self.paramIsArray(param):
paramdecl += ' for (uint32_t index=0;index<' + param.attrib.get('len') + ';index++) {\n'
paramdecl += ' ' + functionprefix + 'WriteObject(my_data, ' + paramname.text + '[index]);\n'
paramdecl += ' }\n'
else:
paramdecl += ' ' + functionprefix + 'WriteObject(my_data, ' + paramname.text + ');\n'
elif (param.attrib.get('externsync')):
if self.paramIsArray(param):
# Externsync can list pointers to arrays of members to synchronize
paramdecl += ' for (uint32_t index=0;index<' + param.attrib.get('len') + ';index++) {\n'
for member in externsync.split(","):
# Replace first empty [] in member name with index
element = member.replace('[]','[index]',1)
if '[]' in element:
# Replace any second empty [] in element name with
# inner array index based on mapping array names like
# "pSomeThings[]" to "someThingCount" array size.
# This could be more robust by mapping a param member
# name to a struct type and "len" attribute.
limit = element[0:element.find('s[]')] + 'Count'
dotp = limit.rfind('.p')
limit = limit[0:dotp+1] + limit[dotp+2:dotp+3].lower() + limit[dotp+3:]
paramdecl += ' for(uint32_t index2=0;index2<'+limit+';index2++)'
element = element.replace('[]','[index2]')
paramdecl += ' ' + functionprefix + 'WriteObject(my_data, ' + element + ');\n'
paramdecl += ' }\n'
else:
# externsync can list members to synchronize
for member in externsync.split(","):
paramdecl += ' ' + functionprefix + 'WriteObject(my_data, ' + member + ');\n'
else:
paramtype = param.find('type')
if paramtype is not None:
paramtype = paramtype.text
else:
paramtype = 'None'
if paramtype in thread_check_dispatchable_objects or paramtype in thread_check_nondispatchable_objects:
if self.paramIsArray(param) and ('pPipelines' != paramname.text):
paramdecl += ' for (uint32_t index=0;index<' + param.attrib.get('len') + ';index++) {\n'
paramdecl += ' ' + functionprefix + 'ReadObject(my_data, ' + paramname.text + '[index]);\n'
paramdecl += ' }\n'
elif not self.paramIsPointer(param):
# Pointer params are often being created.
# They are not being read from.
paramdecl += ' ' + functionprefix + 'ReadObject(my_data, ' + paramname.text + ');\n'
explicitexternsyncparams = cmd.findall("param[@externsync]")
if (explicitexternsyncparams is not None):
for param in explicitexternsyncparams:
externsyncattrib = param.attrib.get('externsync')
paramname = param.find('name')
paramdecl += '// Host access to '
if externsyncattrib == 'true':
if self.paramIsArray(param):
paramdecl += 'each member of ' + paramname.text
elif self.paramIsPointer(param):
paramdecl += 'the object referenced by ' + paramname.text
else:
paramdecl += paramname.text
else:
paramdecl += externsyncattrib
paramdecl += ' must be externally synchronized\n'
# Find and add any "implicit" parameters that are thread unsafe
implicitexternsyncparams = cmd.find('implicitexternsyncparams')
if (implicitexternsyncparams is not None):
for elem in implicitexternsyncparams:
paramdecl += ' // '
paramdecl += elem.text
paramdecl += ' must be externally synchronized between host accesses\n'
if (paramdecl == ''):
return None
else:
return paramdecl
def beginFile(self, genOpts):
OutputGenerator.beginFile(self, genOpts)
# C-specific
#
# Multiple inclusion protection & C++ namespace.
if (genOpts.protectFile and self.genOpts.filename):
headerSym = '__' + re.sub('\.h', '_h_', os.path.basename(self.genOpts.filename))
write('#ifndef', headerSym, file=self.outFile)
write('#define', headerSym, '1', file=self.outFile)
self.newline()
write('namespace threading {', file=self.outFile)
self.newline()
#
# User-supplied prefix text, if any (list of strings)
if (genOpts.prefixText):
for s in genOpts.prefixText:
write(s, file=self.outFile)
def endFile(self):
# C-specific
# Finish C++ namespace and multiple inclusion protection
self.newline()
# record intercepted procedures
write('// intercepts', file=self.outFile)
write('struct { const char* name; PFN_vkVoidFunction pFunc;} procmap[] = {', file=self.outFile)
write('\n'.join(self.intercepts), file=self.outFile)
write('};\n', file=self.outFile)
self.newline()
write('} // namespace threading', file=self.outFile)
if (self.genOpts.protectFile and self.genOpts.filename):
self.newline()
write('#endif', file=self.outFile)
# Finish processing in superclass
OutputGenerator.endFile(self)
def beginFeature(self, interface, emit):
#write('// starting beginFeature', file=self.outFile)
# Start processing in superclass
OutputGenerator.beginFeature(self, interface, emit)
# C-specific
# Accumulate includes, defines, types, enums, function pointer typedefs,
# end function prototypes separately for this feature. They're only
# printed in endFeature().
self.sections = dict([(section, []) for section in self.ALL_SECTIONS])
#write('// ending beginFeature', file=self.outFile)
def endFeature(self):
# C-specific
# Actually write the interface to the output file.
#write('// starting endFeature', file=self.outFile)
if (self.emit):
self.newline()
if (self.genOpts.protectFeature):
write('#ifndef', self.featureName, file=self.outFile)
# If type declarations are needed by other features based on
# this one, it may be necessary to suppress the ExtraProtect,
# or move it below the 'for section...' loop.
#write('// endFeature looking at self.featureExtraProtect', file=self.outFile)
if (self.featureExtraProtect != None):
write('#ifdef', self.featureExtraProtect, file=self.outFile)
#write('#define', self.featureName, '1', file=self.outFile)
for section in self.TYPE_SECTIONS:
#write('// endFeature writing section'+section, file=self.outFile)
contents = self.sections[section]
if contents:
write('\n'.join(contents), file=self.outFile)
self.newline()
#write('// endFeature looking at self.sections[command]', file=self.outFile)
if (self.sections['command']):
write('\n'.join(self.sections['command']), end='', file=self.outFile)
self.newline()
if (self.featureExtraProtect != None):
write('#endif /*', self.featureExtraProtect, '*/', file=self.outFile)
if (self.genOpts.protectFeature):
write('#endif /*', self.featureName, '*/', file=self.outFile)
# Finish processing in superclass
OutputGenerator.endFeature(self)
#write('// ending endFeature', file=self.outFile)
#
# Append a definition to the specified section
def appendSection(self, section, text):
# self.sections[section].append('SECTION: ' + section + '\n')
self.sections[section].append(text)
#
# Type generation
def genType(self, typeinfo, name):
pass
#
# Struct (e.g. C "struct" type) generation.
# This is a special case of the <type> tag where the contents are
# interpreted as a set of <member> tags instead of freeform C
# C type declarations. The <member> tags are just like <param>
# tags - they are a declaration of a struct or union member.
# Only simple member declarations are supported (no nested
# structs etc.)
def genStruct(self, typeinfo, typeName):
OutputGenerator.genStruct(self, typeinfo, typeName)
body = 'typedef ' + typeinfo.elem.get('category') + ' ' + typeName + ' {\n'
# paramdecl = self.makeCParamDecl(typeinfo.elem, self.genOpts.alignFuncParam)
for member in typeinfo.elem.findall('.//member'):
body += self.makeCParamDecl(member, self.genOpts.alignFuncParam)
body += ';\n'
body += '} ' + typeName + ';\n'
self.appendSection('struct', body)
#
# Group (e.g. C "enum" type) generation.
# These are concatenated together with other types.
def genGroup(self, groupinfo, groupName):
pass
# Enumerant generation
# <enum> tags may specify their values in several ways, but are usually
# just integers.
def genEnum(self, enuminfo, name):
pass
#
# Command generation
def genCmd(self, cmdinfo, name):
# Commands shadowed by interface functions and are not implemented
interface_functions = [
'vkEnumerateInstanceLayerProperties',
'vkEnumerateInstanceExtensionProperties',
'vkEnumerateDeviceLayerProperties',
]
if name in interface_functions:
return
special_functions = [
'vkGetDeviceProcAddr',
'vkGetInstanceProcAddr',
'vkCreateDevice',
'vkDestroyDevice',
'vkCreateInstance',
'vkDestroyInstance',
'vkAllocateCommandBuffers',
'vkFreeCommandBuffers',
'vkCreateDebugReportCallbackEXT',
'vkDestroyDebugReportCallbackEXT',
]
if name in special_functions:
decls = self.makeCDecls(cmdinfo.elem)
self.appendSection('command', '')
self.appendSection('command', '// declare only')
self.appendSection('command', decls[0])
self.intercepts += [ ' {"%s", reinterpret_cast<PFN_vkVoidFunction>(%s)},' % (name,name[2:]) ]
return
if "KHR" in name:
self.appendSection('command', '// TODO - not wrapping KHR function ' + name)
return
if ("DebugMarker" in name) and ("EXT" in name):
self.appendSection('command', '// TODO - not wrapping EXT function ' + name)
return
# Determine first if this function needs to be intercepted
startthreadsafety = self.makeThreadUseBlock(cmdinfo.elem, 'start')
if startthreadsafety is None:
return
finishthreadsafety = self.makeThreadUseBlock(cmdinfo.elem, 'finish')
# record that the function will be intercepted
if (self.featureExtraProtect != None):
self.intercepts += [ '#ifdef %s' % self.featureExtraProtect ]
self.intercepts += [ ' {"%s", reinterpret_cast<PFN_vkVoidFunction>(%s)},' % (name,name[2:]) ]
if (self.featureExtraProtect != None):
self.intercepts += [ '#endif' ]
OutputGenerator.genCmd(self, cmdinfo, name)
#
decls = self.makeCDecls(cmdinfo.elem)
self.appendSection('command', '')
self.appendSection('command', decls[0][:-1])
self.appendSection('command', '{')
# setup common to call wrappers
# first parameter is always dispatchable
dispatchable_type = cmdinfo.elem.find('param/type').text
dispatchable_name = cmdinfo.elem.find('param/name').text
self.appendSection('command', ' dispatch_key key = get_dispatch_key('+dispatchable_name+');')
self.appendSection('command', ' layer_data *my_data = get_my_data_ptr(key, layer_data_map);')
if dispatchable_type in ["VkPhysicalDevice", "VkInstance"]:
self.appendSection('command', ' VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;')
else:
self.appendSection('command', ' VkLayerDispatchTable *pTable = my_data->device_dispatch_table;')
# Declare result variable, if any.
resulttype = cmdinfo.elem.find('proto/type')
if (resulttype != None and resulttype.text == 'void'):
resulttype = None
if (resulttype != None):
self.appendSection('command', ' ' + resulttype.text + ' result;')
assignresult = 'result = '
else:
assignresult = ''
self.appendSection('command', str(startthreadsafety))
params = cmdinfo.elem.findall('param/name')
paramstext = ','.join([str(param.text) for param in params])
API = cmdinfo.elem.attrib.get('name').replace('vk','pTable->',1)
self.appendSection('command', ' ' + assignresult + API + '(' + paramstext + ');')
self.appendSection('command', str(finishthreadsafety))
# Return result variable, if any.
if (resulttype != None):
self.appendSection('command', ' return result;')
self.appendSection('command', '}')
#
# override makeProtoName to drop the "vk" prefix
def makeProtoName(self, name, tail):
return self.genOpts.apientry + name[2:] + tail
# ParamCheckerOutputGenerator - subclass of OutputGenerator.
# Generates param checker layer code.
#
# ---- methods ----
# ParamCheckerOutputGenerator(errFile, warnFile, diagFile) - args as for
# OutputGenerator. Defines additional internal state.
# ---- methods overriding base class ----
# beginFile(genOpts)
# endFile()
# beginFeature(interface, emit)
# endFeature()
# genType(typeinfo,name)
# genStruct(typeinfo,name)
# genGroup(groupinfo,name)
# genEnum(enuminfo, name)
# genCmd(cmdinfo)
class ParamCheckerOutputGenerator(OutputGenerator):
"""Generate ParamChecker code based on XML element attributes"""
# This is an ordered list of sections in the header file.
ALL_SECTIONS = ['command']
def __init__(self,
errFile = sys.stderr,
warnFile = sys.stderr,
diagFile = sys.stdout):
OutputGenerator.__init__(self, errFile, warnFile, diagFile)
self.INDENT_SPACES = 4
# Commands to ignore
self.blacklist = [
'vkGetInstanceProcAddr',
'vkGetDeviceProcAddr',
'vkEnumerateInstanceLayerProperties',
'vkEnumerateInstanceExtensionsProperties',
'vkEnumerateDeviceLayerProperties',
'vkEnumerateDeviceExtensionsProperties',
'vkCreateDebugReportCallbackEXT',
'vkDebugReportMessageEXT']
# Validation conditions for some special case struct members that are conditionally validated
self.structMemberValidationConditions = { 'VkPipelineColorBlendStateCreateInfo' : { 'logicOp' : '{}logicOpEnable == VK_TRUE' } }
# Internal state - accumulators for different inner block text
self.sections = dict([(section, []) for section in self.ALL_SECTIONS])
self.structNames = [] # List of Vulkan struct typenames
self.stypes = [] # Values from the VkStructureType enumeration
self.structTypes = dict() # Map of Vulkan struct typename to required VkStructureType
self.handleTypes = set() # Set of handle type names
self.commands = [] # List of CommandData records for all Vulkan commands
self.structMembers = [] # List of StructMemberData records for all Vulkan structs
self.validatedStructs = dict() # Map of structs type names to generated validation code for that struct type
self.enumRanges = dict() # Map of enum name to BEGIN/END range values
self.flags = set() # Map of flags typenames
self.flagBits = dict() # Map of flag bits typename to list of values
# Named tuples to store struct and command data
self.StructType = namedtuple('StructType', ['name', 'value'])
self.CommandParam = namedtuple('CommandParam', ['type', 'name', 'ispointer', 'isstaticarray', 'isbool', 'israngedenum',
'isconst', 'isoptional', 'iscount', 'noautovalidity', 'len', 'extstructs',
'condition', 'cdecl'])
self.CommandData = namedtuple('CommandData', ['name', 'params', 'cdecl'])
self.StructMemberData = namedtuple('StructMemberData', ['name', 'members'])
#
def incIndent(self, indent):
inc = ' ' * self.INDENT_SPACES
if indent:
return indent + inc
return inc
#
def decIndent(self, indent):
if indent and (len(indent) > self.INDENT_SPACES):
return indent[:-self.INDENT_SPACES]
return ''
#
def beginFile(self, genOpts):
OutputGenerator.beginFile(self, genOpts)
# C-specific
#
# User-supplied prefix text, if any (list of strings)
if (genOpts.prefixText):
for s in genOpts.prefixText:
write(s, file=self.outFile)
#
# Multiple inclusion protection & C++ wrappers.
if (genOpts.protectFile and self.genOpts.filename):
headerSym = re.sub('\.h', '_H', os.path.basename(self.genOpts.filename)).upper()
write('#ifndef', headerSym, file=self.outFile)
write('#define', headerSym, '1', file=self.outFile)
self.newline()
#
# Headers
write('#include <string>', file=self.outFile)
self.newline()
write('#include "vulkan/vulkan.h"', file=self.outFile)
write('#include "vk_layer_extension_utils.h"', file=self.outFile)
write('#include "parameter_validation_utils.h"', file=self.outFile)
#
# Macros
self.newline()
write('#ifndef UNUSED_PARAMETER', file=self.outFile)
write('#define UNUSED_PARAMETER(x) (void)(x)', file=self.outFile)
write('#endif // UNUSED_PARAMETER', file=self.outFile)
#
# Namespace
self.newline()
write('namespace parameter_validation {', file = self.outFile)
def endFile(self):
# C-specific
self.newline()
# Namespace
write('} // namespace parameter_validation', file = self.outFile)
# Finish C++ wrapper and multiple inclusion protection
if (self.genOpts.protectFile and self.genOpts.filename):
self.newline()
write('#endif', file=self.outFile)
# Finish processing in superclass
OutputGenerator.endFile(self)
def beginFeature(self, interface, emit):
# Start processing in superclass
OutputGenerator.beginFeature(self, interface, emit)
# C-specific
# Accumulate includes, defines, types, enums, function pointer typedefs,
# end function prototypes separately for this feature. They're only
# printed in endFeature().
self.sections = dict([(section, []) for section in self.ALL_SECTIONS])
self.structNames = []
self.stypes = []
self.structTypes = dict()
self.handleTypes = set()
self.commands = []
self.structMembers = []
self.validatedStructs = dict()
self.enumRanges = dict()
self.flags = set()
self.flagBits = dict()
def endFeature(self):
# C-specific
# Actually write the interface to the output file.
if (self.emit):
self.newline()
# If type declarations are needed by other features based on
# this one, it may be necessary to suppress the ExtraProtect,
# or move it below the 'for section...' loop.
if (self.featureExtraProtect != None):
write('#ifdef', self.featureExtraProtect, file=self.outFile)
# Generate the struct member checking code from the captured data
self.processStructMemberData()
# Generate the command parameter checking code from the captured data
self.processCmdData()
# Write the declarations for the VkFlags values combining all flag bits
for flag in sorted(self.flags):
flagBits = flag.replace('Flags', 'FlagBits')
if flagBits in self.flagBits:
bits = self.flagBits[flagBits]
decl = 'const {} All{} = {}'.format(flag, flagBits, bits[0])
for bit in bits[1:]:
decl += '|' + bit
decl += ';'
write(decl, file=self.outFile)
self.newline()
# Write the parameter validation code to the file
if (self.sections['command']):
if (self.genOpts.protectProto):
write(self.genOpts.protectProto,
self.genOpts.protectProtoStr, file=self.outFile)
write('\n'.join(self.sections['command']), end='', file=self.outFile)
if (self.featureExtraProtect != None):
write('#endif /*', self.featureExtraProtect, '*/', file=self.outFile)
else:
self.newline()
# Finish processing in superclass
OutputGenerator.endFeature(self)
#
# Append a definition to the specified section
def appendSection(self, section, text):
# self.sections[section].append('SECTION: ' + section + '\n')
self.sections[section].append(text)
#
# Type generation
def genType(self, typeinfo, name):
OutputGenerator.genType(self, typeinfo, name)
typeElem = typeinfo.elem
# If the type is a struct type, traverse the imbedded <member> tags
# generating a structure. Otherwise, emit the tag text.
category = typeElem.get('category')
if (category == 'struct' or category == 'union'):
self.structNames.append(name)
self.genStruct(typeinfo, name)
elif (category == 'handle'):
self.handleTypes.add(name)
elif (category == 'bitmask'):
self.flags.add(name)
#
# Struct parameter check generation.
# This is a special case of the <type> tag where the contents are
# interpreted as a set of <member> tags instead of freeform C
# C type declarations. The <member> tags are just like <param>
# tags - they are a declaration of a struct or union member.
# Only simple member declarations are supported (no nested
# structs etc.)
def genStruct(self, typeinfo, typeName):
OutputGenerator.genStruct(self, typeinfo, typeName)
conditions = self.structMemberValidationConditions[typeName] if typeName in self.structMemberValidationConditions else None
members = typeinfo.elem.findall('.//member')
#
# Iterate over members once to get length parameters for arrays
lens = set()
for member in members:
len = self.getLen(member)
if len:
lens.add(len)
#
# Generate member info
membersInfo = []
for member in members:
# Get the member's type and name
info = self.getTypeNameTuple(member)
type = info[0]
name = info[1]
stypeValue = ''
cdecl = self.makeCParamDecl(member, 0)
# Process VkStructureType
if type == 'VkStructureType':
# Extract the required struct type value from the comments
# embedded in the original text defining the 'typeinfo' element
rawXml = etree.tostring(typeinfo.elem).decode('ascii')
result = re.search(r'VK_STRUCTURE_TYPE_\w+', rawXml)
if result:
value = result.group(0)
else:
value = self.genVkStructureType(typeName)
# Store the required type value
self.structTypes[typeName] = self.StructType(name=name, value=value)
#
# Store pointer/array/string info
# Check for parameter name in lens set
iscount = False
if name in lens:
iscount = True
# The pNext members are not tagged as optional, but are treated as
# optional for parameter NULL checks. Static array members
# are also treated as optional to skip NULL pointer validation, as
# they won't be NULL.
isstaticarray = self.paramIsStaticArray(member)
isoptional = False
if self.paramIsOptional(member) or (name == 'pNext') or (isstaticarray):
isoptional = True
membersInfo.append(self.CommandParam(type=type, name=name,
ispointer=self.paramIsPointer(member),
isstaticarray=isstaticarray,
isbool=True if type == 'VkBool32' else False,
israngedenum=True if type in self.enumRanges else False,
isconst=True if 'const' in cdecl else False,
isoptional=isoptional,
iscount=iscount,
noautovalidity=True if member.attrib.get('noautovalidity') is not None else False,
len=self.getLen(member),
extstructs=member.attrib.get('validextensionstructs') if name == 'pNext' else None,
condition=conditions[name] if conditions and name in conditions else None,
cdecl=cdecl))
self.structMembers.append(self.StructMemberData(name=typeName, members=membersInfo))
#
# Capture group (e.g. C "enum" type) info to be used for
# param check code generation.
# These are concatenated together with other types.
def genGroup(self, groupinfo, groupName):
OutputGenerator.genGroup(self, groupinfo, groupName)
groupElem = groupinfo.elem
#
# Store the sType values
if groupName == 'VkStructureType':
for elem in groupElem.findall('enum'):
self.stypes.append(elem.get('name'))
elif 'FlagBits' in groupName:
bits = []
for elem in groupElem.findall('enum'):
bits.append(elem.get('name'))
if bits:
self.flagBits[groupName] = bits
else:
# Determine if begin/end ranges are needed (we don't do this for VkStructureType, which has a more finely grained check)
expandName = re.sub(r'([0-9a-z_])([A-Z0-9][^A-Z0-9]?)',r'\1_\2',groupName).upper()
expandPrefix = expandName
expandSuffix = ''
expandSuffixMatch = re.search(r'[A-Z][A-Z]+$',groupName)
if expandSuffixMatch:
expandSuffix = '_' + expandSuffixMatch.group()
# Strip off the suffix from the prefix
expandPrefix = expandName.rsplit(expandSuffix, 1)[0]
isEnum = ('FLAG_BITS' not in expandPrefix)
if isEnum:
self.enumRanges[groupName] = (expandPrefix + '_BEGIN_RANGE' + expandSuffix, expandPrefix + '_END_RANGE' + expandSuffix)
#
# Capture command parameter info to be used for param
# check code generation.
def genCmd(self, cmdinfo, name):
OutputGenerator.genCmd(self, cmdinfo, name)
if name not in self.blacklist:
params = cmdinfo.elem.findall('param')
# Get list of array lengths
lens = set()
for param in params:
len = self.getLen(param)
if len:
lens.add(len)
# Get param info
paramsInfo = []
for param in params:
paramInfo = self.getTypeNameTuple(param)
cdecl = self.makeCParamDecl(param, 0)
# Check for parameter name in lens set
iscount = False
if paramInfo[1] in lens:
iscount = True
paramsInfo.append(self.CommandParam(type=paramInfo[0], name=paramInfo[1],
ispointer=self.paramIsPointer(param),
isstaticarray=self.paramIsStaticArray(param),
isbool=True if paramInfo[0] == 'VkBool32' else False,
israngedenum=True if paramInfo[0] in self.enumRanges else False,
isconst=True if 'const' in cdecl else False,
isoptional=self.paramIsOptional(param),
iscount=iscount,
noautovalidity=True if param.attrib.get('noautovalidity') is not None else False,
len=self.getLen(param),
extstructs=None,
condition=None,
cdecl=cdecl))
self.commands.append(self.CommandData(name=name, params=paramsInfo, cdecl=self.makeCDecls(cmdinfo.elem)[0]))
#
# Check if the parameter passed in is a pointer
def paramIsPointer(self, param):
ispointer = 0
paramtype = param.find('type')
if (paramtype.tail is not None) and ('*' in paramtype.tail):
ispointer = paramtype.tail.count('*')
elif paramtype.text[:4] == 'PFN_':
# Treat function pointer typedefs as a pointer to a single value
ispointer = 1
return ispointer
#
# Check if the parameter passed in is a static array
def paramIsStaticArray(self, param):
isstaticarray = 0
paramname = param.find('name')
if (paramname.tail is not None) and ('[' in paramname.tail):
isstaticarray = paramname.tail.count('[')
return isstaticarray
#
# Check if the parameter passed in is optional
# Returns a list of Boolean values for comma separated len attributes (len='false,true')
def paramIsOptional(self, param):
# See if the handle is optional
isoptional = False
# Simple, if it's optional, return true
optString = param.attrib.get('optional')
if optString:
if optString == 'true':
isoptional = True
elif ',' in optString:
opts = []
for opt in optString.split(','):
val = opt.strip()
if val == 'true':
opts.append(True)
elif val == 'false':
opts.append(False)
else:
print('Unrecognized len attribute value',val)
isoptional = opts
return isoptional
#
# Check if the handle passed in is optional
# Uses the same logic as ValidityOutputGenerator.isHandleOptional
def isHandleOptional(self, param, lenParam):
# Simple, if it's optional, return true
if param.isoptional:
return True
# If no validity is being generated, it usually means that validity is complex and not absolute, so let's say yes.
if param.noautovalidity:
return True
# If the parameter is an array and we haven't already returned, find out if any of the len parameters are optional
if lenParam and lenParam.isoptional:
return True
return False
#
# Generate a VkStructureType based on a structure typename
def genVkStructureType(self, typename):
# Add underscore between lowercase then uppercase
value = re.sub('([a-z0-9])([A-Z])', r'\1_\2', typename)
# Change to uppercase
value = value.upper()
# Add STRUCTURE_TYPE_
return re.sub('VK_', 'VK_STRUCTURE_TYPE_', value)
#
# Get the cached VkStructureType value for the specified struct typename, or generate a VkStructureType
# value assuming the struct is defined by a different feature
def getStructType(self, typename):
value = None
if typename in self.structTypes:
value = self.structTypes[typename].value
else:
value = self.genVkStructureType(typename)
self.logMsg('diag', 'ParameterValidation: Generating {} for {} structure type that was not defined by the current feature'.format(value, typename))
return value
#
# Retrieve the value of the len tag
def getLen(self, param):
result = None
len = param.attrib.get('len')
if len and len != 'null-terminated':
# For string arrays, 'len' can look like 'count,null-terminated',
# indicating that we have a null terminated array of strings. We
# strip the null-terminated from the 'len' field and only return
# the parameter specifying the string count
if 'null-terminated' in len:
result = len.split(',')[0]
else:
result = len
return result
#
# Retrieve the type and name for a parameter
def getTypeNameTuple(self, param):
type = ''
name = ''
for elem in param:
if elem.tag == 'type':
type = noneStr(elem.text)
elif elem.tag == 'name':
name = noneStr(elem.text)
return (type, name)
#
# Find a named parameter in a parameter list
def getParamByName(self, params, name):
for param in params:
if param.name == name:
return param
return None
#
# Extract length values from latexmath. Currently an inflexible solution that looks for specific
# patterns that are found in vk.xml. Will need to be updated when new patterns are introduced.
def parseLateXMath(self, source):
name = 'ERROR'
decoratedName = 'ERROR'
if 'mathit' in source:
# Matches expressions similar to 'latexmath:[$\lceil{\mathit{rasterizationSamples} \over 32}\rceil$]'
match = re.match(r'latexmath\s*\:\s*\[\s*\$\\l(\w+)\s*\{\s*\\mathit\s*\{\s*(\w+)\s*\}\s*\\over\s*(\d+)\s*\}\s*\\r(\w+)\$\s*\]', source)
if not match or match.group(1) != match.group(4):
raise 'Unrecognized latexmath expression'
name = match.group(2)
decoratedName = '{}({}/{})'.format(*match.group(1, 2, 3))
else:
# Matches expressions similar to 'latexmath : [$dataSize \over 4$]'
match = re.match(r'latexmath\s*\:\s*\[\s*\$\s*(\w+)\s*\\over\s*(\d+)\s*\$\s*\]', source)
name = match.group(1)
decoratedName = '{}/{}'.format(*match.group(1, 2))
return name, decoratedName
#
# Get the length paramater record for the specified parameter name
def getLenParam(self, params, name):
lenParam = None
if name:
if '->' in name:
# The count is obtained by dereferencing a member of a struct parameter
lenParam = self.CommandParam(name=name, iscount=True, ispointer=False, isbool=False, israngedenum=False, isconst=False,
isstaticarray=None, isoptional=False, type=None, noautovalidity=False, len=None, extstructs=None,
condition=None, cdecl=None)
elif 'latexmath' in name:
lenName, decoratedName = self.parseLateXMath(name)
lenParam = self.getParamByName(params, lenName)
# TODO: Zero-check the result produced by the equation?
# Copy the stored len parameter entry and overwrite the name with the processed latexmath equation
#param = self.getParamByName(params, lenName)
#lenParam = self.CommandParam(name=decoratedName, iscount=param.iscount, ispointer=param.ispointer,
# isoptional=param.isoptional, type=param.type, len=param.len,
# isstaticarray=param.isstaticarray, extstructs=param.extstructs,
# noautovalidity=True, condition=None, cdecl=param.cdecl)
else:
lenParam = self.getParamByName(params, name)
return lenParam
#
# Convert a vulkan.h command declaration into a parameter_validation.h definition
def getCmdDef(self, cmd):
#
# Strip the trailing ';' and split into individual lines
lines = cmd.cdecl[:-1].split('\n')
# Replace Vulkan prototype
lines[0] = 'static bool parameter_validation_' + cmd.name + '('
# Replace the first argument with debug_report_data, when the first
# argument is a handle (not vkCreateInstance)
reportData = ' debug_report_data*'.ljust(self.genOpts.alignFuncParam) + 'report_data,'
if cmd.name != 'vkCreateInstance':
lines[1] = reportData
else:
lines.insert(1, reportData)
return '\n'.join(lines)
#
# Generate the code to check for a NULL dereference before calling the
# validation function
def genCheckedLengthCall(self, name, exprs):
count = name.count('->')
if count:
checkedExpr = []
localIndent = ''
elements = name.split('->')
# Open the if expression blocks
for i in range(0, count):
checkedExpr.append(localIndent + 'if ({} != NULL) {{\n'.format('->'.join(elements[0:i+1])))
localIndent = self.incIndent(localIndent)
# Add the validation expression
for expr in exprs:
checkedExpr.append(localIndent + expr)
# Close the if blocks
for i in range(0, count):
localIndent = self.decIndent(localIndent)
checkedExpr.append(localIndent + '}\n')
return [checkedExpr]
# No if statements were required
return exprs
#
# Generate code to check for a specific condition before executing validation code
def genConditionalCall(self, prefix, condition, exprs):
checkedExpr = []
localIndent = ''
formattedCondition = condition.format(prefix)
checkedExpr.append(localIndent + 'if ({})\n'.format(formattedCondition))
checkedExpr.append(localIndent + '{\n')
localIndent = self.incIndent(localIndent)
for expr in exprs:
checkedExpr.append(localIndent + expr)
localIndent = self.decIndent(localIndent)
checkedExpr.append(localIndent + '}\n')
return [checkedExpr]
#
# Generate the sType check string
def makeStructTypeCheck(self, prefix, value, lenValue, valueRequired, lenValueRequired, lenPtrRequired, funcPrintName, lenPrintName, valuePrintName):
checkExpr = []
stype = self.structTypes[value.type]
if lenValue:
# This is an array with a pointer to a count value
if lenValue.ispointer:
# When the length parameter is a pointer, there is an extra Boolean parameter in the function call to indicate if it is required
checkExpr.append('skipCall |= validate_struct_type_array(report_data, "{}", "{ldn}", "{dn}", "{sv}", {pf}{ln}, {pf}{vn}, {sv}, {}, {}, {});\n'.format(
funcPrintName, lenPtrRequired, lenValueRequired, valueRequired, ln=lenValue.name, ldn=lenPrintName, dn=valuePrintName, vn=value.name, sv=stype.value, pf=prefix))
# This is an array with an integer count value
else:
checkExpr.append('skipCall |= validate_struct_type_array(report_data, "{}", "{ldn}", "{dn}", "{sv}", {pf}{ln}, {pf}{vn}, {sv}, {}, {});\n'.format(
funcPrintName, lenValueRequired, valueRequired, ln=lenValue.name, ldn=lenPrintName, dn=valuePrintName, vn=value.name, sv=stype.value, pf=prefix))
# This is an individual struct
else:
checkExpr.append('skipCall |= validate_struct_type(report_data, "{}", "{}", "{sv}", {}{vn}, {sv}, {});\n'.format(
funcPrintName, valuePrintName, prefix, valueRequired, vn=value.name, sv=stype.value))
return checkExpr
#
# Generate the handle check string
def makeHandleCheck(self, prefix, value, lenValue, valueRequired, lenValueRequired, funcPrintName, lenPrintName, valuePrintName):
checkExpr = []
if lenValue:
if lenValue.ispointer:
# This is assumed to be an output array with a pointer to a count value
raise('Unsupported parameter validation case: Output handle array elements are not NULL checked')
else:
# This is an array with an integer count value
checkExpr.append('skipCall |= validate_handle_array(report_data, "{}", "{ldn}", "{dn}", {pf}{ln}, {pf}{vn}, {}, {});\n'.format(
funcPrintName, lenValueRequired, valueRequired, ln=lenValue.name, ldn=lenPrintName, dn=valuePrintName, vn=value.name, pf=prefix))
else:
# This is assumed to be an output handle pointer
raise('Unsupported parameter validation case: Output handles are not NULL checked')
return checkExpr
#
# Generate check string for an array of VkFlags values
def makeFlagsArrayCheck(self, prefix, value, lenValue, valueRequired, lenValueRequired, funcPrintName, lenPrintName, valuePrintName):
checkExpr = []
flagBitsName = value.type.replace('Flags', 'FlagBits')
if not flagBitsName in self.flagBits:
raise('Unsupported parameter validation case: array of reserved VkFlags')
else:
allFlags = 'All' + flagBitsName
checkExpr.append('skipCall |= validate_flags_array(report_data, "{}", "{}", "{}", "{}", {}, {pf}{}, {pf}{}, {}, {});\n'.format(funcPrintName, lenPrintName, valuePrintName, flagBitsName, allFlags, lenValue.name, value.name, lenValueRequired, valueRequired, pf=prefix))
return checkExpr
#
# Generate pNext check string
def makeStructNextCheck(self, prefix, value, funcPrintName, valuePrintName):
checkExpr = []
# Generate an array of acceptable VkStructureType values for pNext
extStructCount = 0
extStructVar = 'NULL'
extStructNames = 'NULL'
if value.extstructs:
structs = value.extstructs.split(',')
checkExpr.append('const VkStructureType allowedStructs[] = {' + ', '.join([self.getStructType(s) for s in structs]) + '};\n')
extStructCount = 'ARRAY_SIZE(allowedStructs)'
extStructVar = 'allowedStructs'
extStructNames = '"' + ', '.join(structs) + '"'
checkExpr.append('skipCall |= validate_struct_pnext(report_data, "{}", "{}", {}, {}{}, {}, {});\n'.format(
funcPrintName, valuePrintName, extStructNames, prefix, value.name, extStructCount, extStructVar))
return checkExpr
#
# Generate the pointer check string
def makePointerCheck(self, prefix, value, lenValue, valueRequired, lenValueRequired, lenPtrRequired, funcPrintName, lenPrintName, valuePrintName):
checkExpr = []
if lenValue:
# This is an array with a pointer to a count value
if lenValue.ispointer:
# If count and array parameters are optional, there will be no validation
if valueRequired == 'true' or lenPtrRequired == 'true' or lenValueRequired == 'true':
# When the length parameter is a pointer, there is an extra Boolean parameter in the function call to indicate if it is required
checkExpr.append('skipCall |= validate_array(report_data, "{}", "{ldn}", "{dn}", {pf}{ln}, {pf}{vn}, {}, {}, {});\n'.format(
funcPrintName, lenPtrRequired, lenValueRequired, valueRequired, ln=lenValue.name, ldn=lenPrintName, dn=valuePrintName, vn=value.name, pf=prefix))
# This is an array with an integer count value
else:
# If count and array parameters are optional, there will be no validation
if valueRequired == 'true' or lenValueRequired == 'true':
# Arrays of strings receive special processing
validationFuncName = 'validate_array' if value.type != 'char' else 'validate_string_array'
checkExpr.append('skipCall |= {}(report_data, "{}", "{ldn}", "{dn}", {pf}{ln}, {pf}{vn}, {}, {});\n'.format(
validationFuncName, funcPrintName, lenValueRequired, valueRequired, ln=lenValue.name, ldn=lenPrintName, dn=valuePrintName, vn=value.name, pf=prefix))
if checkExpr:
if lenValue and ('->' in lenValue.name):
# Add checks to ensure the validation call does not dereference a NULL pointer to obtain the count
checkExpr = self.genCheckedLengthCall(lenValue.name, checkExpr)
# This is an individual struct that is not allowed to be NULL
elif not value.isoptional:
# Function pointers need a reinterpret_cast to void*
if value.type[:4] == 'PFN_':
checkExpr.append('skipCall |= validate_required_pointer(report_data, "{}", "{}", reinterpret_cast<const void*>({}{}));\n'.format(funcPrintName, valuePrintName, prefix, value.name))
else:
checkExpr.append('skipCall |= validate_required_pointer(report_data, "{}", "{}", {}{});\n'.format(funcPrintName, valuePrintName, prefix, value.name))
return checkExpr
#
# Process struct member validation code, performing name suibstitution if required
def processStructMemberCode(self, line, funcName, memberNamePrefix, memberDisplayNamePrefix):
if any(token in line for token in ['{funcName}', '{valuePrefix}', '{displayNamePrefix}']):
return line.format(funcName=funcName, valuePrefix=memberNamePrefix, displayNamePrefix=memberDisplayNamePrefix)
return line
#
# Process struct validation code for inclusion in function or parent struct validation code
def expandStructCode(self, lines, funcName, memberNamePrefix, memberDisplayNamePrefix, indent, output):
for line in lines:
if output:
output[-1] += '\n'
if type(line) is list:
for sub in line:
output.append(self.processStructMemberCode(indent + sub, funcName, memberNamePrefix, memberDisplayNamePrefix))
else:
output.append(self.processStructMemberCode(indent + line, funcName, memberNamePrefix, memberDisplayNamePrefix))
return output
#
# Process struct pointer/array validation code, perfoeming name substitution if required
def expandStructPointerCode(self, prefix, value, lenValue, funcName, valueDisplayName):
expr = []
expr.append('if ({}{} != NULL)\n'.format(prefix, value.name))
expr.append('{')
indent = self.incIndent(None)
if lenValue:
# Need to process all elements in the array
indexName = lenValue.name.replace('Count', 'Index')
expr[-1] += '\n'
expr.append(indent + 'for (uint32_t {iname} = 0; {iname} < {}{}; ++{iname})\n'.format(prefix, lenValue.name, iname=indexName))
expr.append(indent + '{')
indent = self.incIndent(indent)
# Prefix for value name to display in error message
memberNamePrefix = '{}{}[{}].'.format(prefix, value.name, indexName)
memberDisplayNamePrefix = '{}[i].'.format(valueDisplayName)
else:
memberNamePrefix = '{}{}->'.format(prefix, value.name)
memberDisplayNamePrefix = '{}->'.format(valueDisplayName)
#
# Expand the struct validation lines
expr = self.expandStructCode(self.validatedStructs[value.type], funcName, memberNamePrefix, memberDisplayNamePrefix, indent, expr)
#
if lenValue:
# Close if and for scopes
indent = self.decIndent(indent)
expr.append(indent + '}\n')
expr.append('}\n')
return expr
#
# Generate the parameter checking code
def genFuncBody(self, funcName, values, valuePrefix, displayNamePrefix, structTypeName):
lines = [] # Generated lines of code
unused = [] # Unused variable names
for value in values:
usedLines = []
lenParam = None
#
# Generate the full name of the value, which will be printed in the error message, by adding the variable prefix to the value name
valueDisplayName = '{}{}'.format(displayNamePrefix, value.name)
#
# Check for NULL pointers, ignore the inout count parameters that
# will be validated with their associated array
if (value.ispointer or value.isstaticarray) and not value.iscount:
#
# Parameters for function argument generation
req = 'true' # Paramerter cannot be NULL
cpReq = 'true' # Count pointer cannot be NULL
cvReq = 'true' # Count value cannot be 0
lenDisplayName = None # Name of length parameter to print with validation messages; parameter name with prefix applied
#
# Generate required/optional parameter strings for the pointer and count values
if value.isoptional:
req = 'false'
if value.len:
# The parameter is an array with an explicit count parameter
lenParam = self.getLenParam(values, value.len)
lenDisplayName = '{}{}'.format(displayNamePrefix, lenParam.name)
if lenParam.ispointer:
# Count parameters that are pointers are inout
if type(lenParam.isoptional) is list:
if lenParam.isoptional[0]:
cpReq = 'false'
if lenParam.isoptional[1]:
cvReq = 'false'
else:
if lenParam.isoptional:
cpReq = 'false'
else:
if lenParam.isoptional:
cvReq = 'false'
#
# The parameter will not be processes when tagged as 'noautovalidity'
# For the pointer to struct case, the struct pointer will not be validated, but any
# members not tagged as 'noatuvalidity' will be validated
if value.noautovalidity:
# Log a diagnostic message when validation cannot be automatically generated and must be implemented manually
self.logMsg('diag', 'ParameterValidation: No validation for {} {}'.format(structTypeName if structTypeName else funcName, value.name))
else:
#
# If this is a pointer to a struct with an sType field, verify the type
if value.type in self.structTypes:
usedLines += self.makeStructTypeCheck(valuePrefix, value, lenParam, req, cvReq, cpReq, funcName, lenDisplayName, valueDisplayName)
# If this is an input handle array that is not allowed to contain NULL handles, verify that none of the handles are VK_NULL_HANDLE
elif value.type in self.handleTypes and value.isconst and not self.isHandleOptional(value, lenParam):
usedLines += self.makeHandleCheck(valuePrefix, value, lenParam, req, cvReq, funcName, lenDisplayName, valueDisplayName)
elif value.type in self.flags and value.isconst:
usedLines += self.makeFlagsArrayCheck(valuePrefix, value, lenParam, req, cvReq, funcName, lenDisplayName, valueDisplayName)
elif value.isbool and value.isconst:
usedLines.append('skipCall |= validate_bool32_array(report_data, "{}", "{}", "{}", {pf}{}, {pf}{}, {}, {});\n'.format(funcName, lenDisplayName, valueDisplayName, lenParam.name, value.name, cvReq, req, pf=valuePrefix))
elif value.israngedenum and value.isconst:
enumRange = self.enumRanges[value.type]
usedLines.append('skipCall |= validate_ranged_enum_array(report_data, "{}", "{}", "{}", "{}", {}, {}, {pf}{}, {pf}{}, {}, {});\n'.format(funcName, lenDisplayName, valueDisplayName, value.type, enumRange[0], enumRange[1], lenParam.name, value.name, cvReq, req, pf=valuePrefix))
elif value.name == 'pNext':
# We need to ignore VkDeviceCreateInfo and VkInstanceCreateInfo, as the loader manipulates them in a way that is not documented in vk.xml
if not structTypeName in ['VkDeviceCreateInfo', 'VkInstanceCreateInfo']:
usedLines += self.makeStructNextCheck(valuePrefix, value, funcName, valueDisplayName)
else:
usedLines += self.makePointerCheck(valuePrefix, value, lenParam, req, cvReq, cpReq, funcName, lenDisplayName, valueDisplayName)
#
# If this is a pointer to a struct (input), see if it contains members that need to be checked
if value.type in self.validatedStructs and value.isconst:
usedLines.append(self.expandStructPointerCode(valuePrefix, value, lenParam, funcName, valueDisplayName))
# Non-pointer types
else:
#
# The parameter will not be processes when tagged as 'noautovalidity'
# For the struct case, the struct type will not be validated, but any
# members not tagged as 'noatuvalidity' will be validated
if value.noautovalidity:
# Log a diagnostic message when validation cannot be automatically generated and must be implemented manually
self.logMsg('diag', 'ParameterValidation: No validation for {} {}'.format(structTypeName if structTypeName else funcName, value.name))
else:
if value.type in self.structTypes:
stype = self.structTypes[value.type]
usedLines.append('skipCall |= validate_struct_type(report_data, "{}", "{}", "{sv}", &({}{vn}), {sv}, false);\n'.format(
funcName, valueDisplayName, valuePrefix, vn=value.name, sv=stype.value))
elif value.type in self.handleTypes:
if not self.isHandleOptional(value, None):
usedLines.append('skipCall |= validate_required_handle(report_data, "{}", "{}", {}{});\n'.format(funcName, valueDisplayName, valuePrefix, value.name))
elif value.type in self.flags:
flagBitsName = value.type.replace('Flags', 'FlagBits')
if not flagBitsName in self.flagBits:
usedLines.append('skipCall |= validate_reserved_flags(report_data, "{}", "{}", {pf}{});\n'.format(funcName, valueDisplayName, value.name, pf=valuePrefix))
else:
flagsRequired = 'false' if value.isoptional else 'true'
allFlagsName = 'All' + flagBitsName
usedLines.append('skipCall |= validate_flags(report_data, "{}", "{}", "{}", {}, {pf}{}, {});\n'.format(funcName, valueDisplayName, flagBitsName, allFlagsName, value.name, flagsRequired, pf=valuePrefix))
elif value.isbool:
usedLines.append('skipCall |= validate_bool32(report_data, "{}", "{}", {}{});\n'.format(funcName, valueDisplayName, valuePrefix, value.name))
elif value.israngedenum:
enumRange = self.enumRanges[value.type]
usedLines.append('skipCall |= validate_ranged_enum(report_data, "{}", "{}", "{}", {}, {}, {}{});\n'.format(funcName, valueDisplayName, value.type, enumRange[0], enumRange[1], valuePrefix, value.name))
#
# If this is a struct, see if it contains members that need to be checked
if value.type in self.validatedStructs:
memberNamePrefix = '{}{}.'.format(valuePrefix, value.name)
memberDisplayNamePrefix = '{}.'.format(valueDisplayName)
usedLines.append(self.expandStructCode(self.validatedStructs[value.type], funcName, memberNamePrefix, memberDisplayNamePrefix, '', []))
#
# Append the parameter check to the function body for the current command
if usedLines:
# Apply special conditional checks
if value.condition:
usedLines = self.genConditionalCall(valuePrefix, value.condition, usedLines)
lines += usedLines
elif not value.iscount:
# If no expression was generated for this value, it is unreferenced by the validation function, unless
# it is an array count, which is indirectly referenced for array valiadation.
unused.append(value.name)
return lines, unused
#
# Generate the struct member check code from the captured data
def processStructMemberData(self):
indent = self.incIndent(None)
for struct in self.structMembers:
#
# The string returned by genFuncBody will be nested in an if check for a NULL pointer, so needs its indent incremented
lines, unused = self.genFuncBody('{funcName}', struct.members, '{valuePrefix}', '{displayNamePrefix}', struct.name)
if lines:
self.validatedStructs[struct.name] = lines
#
# Generate the command param check code from the captured data
def processCmdData(self):
indent = self.incIndent(None)
for command in self.commands:
# Skip first parameter if it is a dispatch handle (everything except vkCreateInstance)
startIndex = 0 if command.name == 'vkCreateInstance' else 1
lines, unused = self.genFuncBody(command.name, command.params[startIndex:], '', '', None)
if lines:
cmdDef = self.getCmdDef(command) + '\n'
cmdDef += '{\n'
# Process unused parameters, Ignoring the first dispatch handle parameter, which is not
# processed by parameter_validation (except for vkCreateInstance, which does not have a
# handle as its first parameter)
if unused:
for name in unused:
cmdDef += indent + 'UNUSED_PARAMETER({});\n'.format(name)
if len(unused) > 0:
cmdDef += '\n'
cmdDef += indent + 'bool skipCall = false;\n'
for line in lines:
cmdDef += '\n'
if type(line) is list:
for sub in line:
cmdDef += indent + sub
else:
cmdDef += indent + line
cmdDef += '\n'
cmdDef += indent + 'return skipCall;\n'
cmdDef += '}\n'
self.appendSection('command', cmdDef)
|
{
"content_hash": "38b2cb30590e2d36ed8bf4d5b55924f6",
"timestamp": "",
"source": "github",
"line_count": 3661,
"max_line_length": 300,
"avg_line_length": 47.16962578530456,
"alnum_prop": 0.5809899935143148,
"repo_name": "Radamanthe/VulkanSamples",
"id": "0b97305eb733247be9f04b4d0e61b39c0c0c7850",
"size": "172688",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "generator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "26152"
},
{
"name": "C",
"bytes": "2609031"
},
{
"name": "C++",
"bytes": "9935652"
},
{
"name": "CMake",
"bytes": "86637"
},
{
"name": "GLSL",
"bytes": "8747"
},
{
"name": "HTML",
"bytes": "31433"
},
{
"name": "JavaScript",
"bytes": "16881"
},
{
"name": "M4",
"bytes": "19093"
},
{
"name": "Makefile",
"bytes": "85475"
},
{
"name": "NSIS",
"bytes": "27655"
},
{
"name": "PowerShell",
"bytes": "32236"
},
{
"name": "Python",
"bytes": "793498"
},
{
"name": "Shell",
"bytes": "359605"
}
],
"symlink_target": ""
}
|
"""Tests related to assigning workers to specific compute units"""
from parsl.providers import LocalProvider
from parsl.channels import LocalChannel
from parsl.config import Config
from parsl.executors import HighThroughputExecutor
from parsl import python_app
import pytest
import os
local_config = Config(
executors=[
HighThroughputExecutor(
label="htex_Local",
worker_debug=True,
max_workers=2,
cpu_affinity='block',
available_accelerators=2,
provider=LocalProvider(
channel=LocalChannel(),
init_blocks=1,
max_blocks=1,
),
)
],
strategy=None,
)
@python_app
def get_worker_info():
from time import sleep
import os
rank = int(os.environ['PARSL_WORKER_RANK'])
aff = os.sched_getaffinity(0)
device = os.environ.get('CUDA_VISIBLE_DEVICES')
sleep(1.0)
return rank, (aff, device)
@pytest.mark.local
@pytest.mark.skipif('sched_getaffinity' not in dir(os), reason='System does not support sched_setaffinity')
@pytest.mark.skipif(os.cpu_count() == 1, reason='Must have a more than one CPU')
def test_htex():
worker_info = [get_worker_info() for _ in range(4)]
worker_affinity = dict([r.result() for r in worker_info])
assert worker_affinity[0] != worker_affinity[1]
assert worker_affinity[0][1] == "0" # Make sure it is pinned to the correct CUDA device
|
{
"content_hash": "86f767f358cc6e29165635667461d239",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 107,
"avg_line_length": 30.375,
"alnum_prop": 0.649519890260631,
"repo_name": "Parsl/parsl",
"id": "b5e412aea7142b99a2fca8b7961db4f82c53214b",
"size": "1458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parsl/tests/sites/test_affinity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1263"
},
{
"name": "CSS",
"bytes": "337"
},
{
"name": "HTML",
"bytes": "12706"
},
{
"name": "Makefile",
"bytes": "4908"
},
{
"name": "Python",
"bytes": "1173869"
},
{
"name": "Shell",
"bytes": "12057"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class TickfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="tickfont", parent_name="contour.colorbar", **kwargs
):
super(TickfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
""",
),
**kwargs
)
|
{
"content_hash": "76d1bdab87ee057cf7c4daf18e284028",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 78,
"avg_line_length": 39.61538461538461,
"alnum_prop": 0.5411003236245955,
"repo_name": "plotly/python-api",
"id": "db2b1bacc648d52306550c18b963d94fe9d995a0",
"size": "1545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/contour/colorbar/_tickfont.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
import os
import imp
import argparse
import tabulate
i = imp.find_module(u"mallet", [u".."])
imp.load_module(u"mallet", *i)
import mallet.class_dump as class_dump
_whitespace = u"\u200b"
def normalize_type(type_32bit, type_64bit):
if type_32bit is None:
type_32bit = type_64bit
elif type_64bit is None:
type_64bit = type_32bit
if type_32bit == type_64bit:
if type_32bit == u"struct CGPoint":
return u"CGPoint"
elif type_32bit == u"struct CGSize":
return u"CGSize"
elif type_32bit == u"struct CGRect":
return u"CGRect"
elif type_32bit == u"struct UIEdgeInsets":
return u"UIEdgeInsets"
elif type_32bit == u"struct __CFDictionary *":
return u"NSDictionary *"
elif type_32bit == u"struct _NSRange":
return u"NSRange"
return type_32bit
elif type_32bit == u"BOOL" and type_64bit == u"bool":
return u"BOOL"
elif type_32bit == u"char" and type_64bit == u"_Bool":
return u"BOOL"
elif type_32bit == u"int" and type_64bit == u"long long":
return u"NSInteger"
elif type_32bit == u"long" and type_64bit == u"long long":
return u"NSInteger"
elif type_32bit == u"unsigned int" and type_64bit == u"unsigned long long":
return u"NSUInteger"
elif type_32bit == u"unsigned long" and type_64bit == u"unsigned long long":
return u"NSUInteger"
elif type_32bit == u"float" and type_64bit == u"double":
return u"CGFloat"
elif type_32bit == u"struct CADoublePoint" and type_64bit == u"struct CGPoint":
return u"CADoublePoint"
print(u"Different types: {} != {}".format(type_32bit, type_64bit))
return type_64bit
def dump_class(module_name, class_name):
"""
Prints class description.
:param str module_name: Module name.
:param str class_name: Class name.
"""
# Current directory path.
current_dir = os.path.abspath(__file__)
current_dir, _ = os.path.split(current_dir)
input_dir = os.path.join(current_dir, u"../mallet/{}/{}".format(module_name, class_dump.class_dumps_folder_name))
input_dir = os.path.normpath(input_dir)
m = class_dump.Module(module_name, input_dir)
architectures = [u"armv7", u"i386", u"arm64", u"x86_64"]
main_architecture = u"arm64"
architecture_32bit = u"armv7"
architecture_64bit = u"arm64"
classes = [m.get_class_or_load(a, class_name) for a in architectures]
main_class = m.get_class_or_load(main_architecture, class_name)
# Output.
output = u"Class: {}\n".format(class_name)
if main_class.super_class_name:
output += u"Super class: {}\n".format(main_class.super_class_name)
if main_class.protocols:
output += u"Protocols: {}\n".format(u", ".join(main_class.protocols))
# iVars.
ivars = sorted(main_class.ivars, key=lambda x: x.offset, reverse=True)
if ivars:
# Headers
headers = [u"Name"]
[headers.append(a) for a in architectures]
rows = list()
for ivar in ivars:
# iVars for all architectures.
architecture_ivars = [cl.get_ivar(ivar.name) for cl in classes]
architecture_ivar_32bit = architecture_ivars[architectures.index(architecture_32bit)]
architecture_ivar_64bit = architecture_ivars[architectures.index(architecture_64bit)]
# Normalized type name.
type32 = architecture_ivar_32bit.ivarType if architecture_ivar_32bit else None
type64 = architecture_ivar_64bit.ivarType if architecture_ivar_64bit else None
type_name = normalize_type(type32, type64)
splitted_type_name = type_name.split(u"\n")
# For multiline types add "empty" rows.
ivar_rows = list()
for type_line in splitted_type_name[:-1]:
type_row = [type_line.replace(u" ", _whitespace)] + [u""] * len(architectures)
ivar_rows.append(type_row)
# Add type line.
type_row = [u"{} {}".format(splitted_type_name[-1], ivar.name).replace(u" ", _whitespace)]
for architecture_ivar in architecture_ivars:
value = u"{0:>3} 0x{0:03X} / {1:<2}".format(architecture_ivar.offset if architecture_ivar is not None else -1,
architecture_ivar.size if architecture_ivar is not None else None).replace(u" ", _whitespace)
type_row.append(value)
ivar_rows.append(type_row)
ivar_rows.reverse() # Ivars are reversed, so rows for ivar also have to be reversed.
rows.extend(ivar_rows)
rows.reverse()
output += tabulate.tabulate(rows, headers)
print(output)
if __name__ == "__main__":
# Argument parser.
parser = argparse.ArgumentParser(description=u"Prints class description.")
parser.add_argument(u"module")
parser.add_argument(u"class")
# Parse arguments.
args = parser.parse_args()
class_name = vars(args)[u"class"]
module_name = args.module
dump_class(module_name, class_name)
|
{
"content_hash": "1df81c13813147ef5cba7aaa44ffe830",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 153,
"avg_line_length": 37.62043795620438,
"alnum_prop": 0.6125339542103221,
"repo_name": "bartoszj/Mallet",
"id": "d9602a49e179497b79f8c11425cf1a7974f8ec66",
"size": "6319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helpers/print_class_dump.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Objective-C",
"bytes": "109523"
},
{
"name": "Python",
"bytes": "546997"
},
{
"name": "Ruby",
"bytes": "734"
},
{
"name": "Shell",
"bytes": "4823"
},
{
"name": "Swift",
"bytes": "57594"
}
],
"symlink_target": ""
}
|
import clr
clr.AddReference("ZeroMQ") # installed in build folder via NuGet.
import ZeroMQ
# make an object to force static class initializations
ZeroMQ.ZFrame()
#from IronPython.Runtime.Types import DynamicHelpers
#_libzmq = DynamicHelpers.GetPythonTypeFromType(ZeroMQ.lib.zmq)
zmq_version_info = ZeroMQ.lib.zmq.version
IPC_PATH_MAX_LEN = 0
from . import constants
from .context import Context
from .socket import Socket
from .frame import Frame
from .message import Message
from .stopwatch import Stopwatch
from .devices import device, proxy
from .poll import zmq_poll
from .error import strerror, zmq_errno
has = ZeroMQ.ZContext.Has
def curve_keypair():
pk = None
sk = None
return ZeroMQ.Z85.CurveKeypair()
__all__ = [ # copy of public_api from zmq\backend\select.py
'Context',
'Socket',
'Frame',
'Message',
'Stopwatch',
'device',
'proxy',
'zmq_poll',
'strerror',
'zmq_errno',
'has',
'curve_keypair',
'constants',
'zmq_version_info',
'IPC_PATH_MAX_LEN',
]
|
{
"content_hash": "b7b995e23098ce0c944b53519699a07f",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 65,
"avg_line_length": 22.645833333333332,
"alnum_prop": 0.6669733210671573,
"repo_name": "swn1/pyzmq",
"id": "c57e47e54fbf95fe483213a2e89a4972c57eb056",
"size": "1087",
"binary": false,
"copies": "1",
"ref": "refs/heads/ironpython",
"path": "zmq/backend/cli/__init__.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "22208"
},
{
"name": "C++",
"bytes": "31677"
},
{
"name": "Python",
"bytes": "662126"
}
],
"symlink_target": ""
}
|
import dbus, gobject, subprocess, glib
from dbus.mainloop.glib import DBusGMainLoop
def send_notifier(dbus, message):
if message.get_member() == "Notify":
subprocess.check_call(['/home/maschmann/git/libnotifyCapture/test'])
DBusGMainLoop(set_as_default=True)
bus = dbus.SessionBus()
bus.add_match_string_non_blocking("interface='org.freedesktop.Notifications'")
bus.add_message_filter(send_notifier)
mainloop = glib.MainLoop()
mainloop.run()
|
{
"content_hash": "1c0ba0bd593c68c0e916a8285b5408ab",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 78,
"avg_line_length": 30.466666666666665,
"alnum_prop": 0.7592997811816192,
"repo_name": "maschmann/libnotifyListener",
"id": "f3319e2a6495aced5865e308de28e6790eac920d",
"size": "890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "capture.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "258"
},
{
"name": "Python",
"bytes": "890"
}
],
"symlink_target": ""
}
|
from pylab import *
import scipy.interpolate
import sklearn.metrics
#Our libraries
import pathutils
pathutils.add_relative_to_current_source_file_path_to_sys_path("../..")
from coord_system import *
from flashlight.splineutils import *
from flashlight.curveutils import *
from flashlight.quadrotorcamera3d import *
from sets import Set
def _compute_param_spacing_L2_norm(P, alpha):
D = sklearn.metrics.pairwise_distances(P,P)
dist = diag(D,k=1) + 0.01
l = pow(dist,alpha)
l_cum = r_[0.0,cumsum(l)]
T = np.tile(c_[l_cum], P.shape[1])
return T
def _get_easing_spline_coefficients(P,T=None,S=None,Z=None,degree=9):
return compute_minimum_variation_nonlocal_interpolating_b_spline_coefficients(P,T=T,S=S,Z=Z,degree=degree,lamb=[0,0,0,1,0],return_derivatives=False)
#return compute_catmull_rom_spline_coefficients(P,T=T,S=None,Z=None,degree=3)
def _evaluate_easing_spline(C,T,sd,T_eval=None,num_samples=200):
return evaluate_minimum_variation_nonlocal_interpolating_b_spline(C,T,sd,T_eval=T_eval,num_samples=num_samples)
#return evaluate_catmull_rom_spline(C,T,sd,T_eval=T_eval,num_samples=num_samples)
def compute_easing_curve(P,T=None,num_samples=200):
has_valid_spline = False
C = None
sd = None
S = Set([])
Z = [0, -1]
# Calculate the spline
C,T,sd = _get_easing_spline_coefficients(P,T=T,S=list(S),Z=Z)
Pev = None
i = 0
MAX_ITERS = 50
while not has_valid_spline or i < MAX_ITERS:
has_valid_spline = True
# Calculate the spline
C,T,sd = _get_easing_spline_coefficients(P,T=T,S=list(S),Z=Z)
# then sample it
Pev,Tev,dT = _evaluate_easing_spline(C,T,sd,num_samples=num_samples)
Pev[0] = 0
Pev[-1] = 1
currentSection = 0
invalidSection = False
for i in range(len(Tev)):
if i == len(Tev)-1 or Tev[i] > T[currentSection+1]:
if invalidSection:
S = S.union(set([currentSection]))
if currentSection == 0:
try:
Z.remove(0)
except:
pass
if currentSection == len(T)-2:
try:
Z.remove(-1)
except:
pass
currentSection = min(currentSection+1,len(T)-1)
invalidSection = False
if Pev[i] < -0.0000001 or Pev[i] > 1.0000001:
invalidSection = True
has_valid_spline = False
i += 1
assert min(Pev) > -0.0000001
assert max(Pev) < 1.0000001
return C,T,sd
def _get_spatial_spline_coefficients(P,T=None,S=None,degree=9,return_derivatives=False,uniformKnots=False):
#return compute_catmull_rom_spline_coefficients(P,T=T,S=S,degree=degree)
if T is None and not uniformKnots:
T = _compute_param_spacing_L2_norm(P, 0.5)
if T[-1,-1] > 0:
T = T / T[-1,-1]
#S = [0]
if uniformKnots:
T = None
return compute_minimum_variation_nonlocal_interpolating_b_spline_coefficients(P,T=T,S=S,degree=9,lamb=[0,0,0,1,0],return_derivatives=False)
def _evaluate_spatial_spline(C,T,sd,T_eval=None,num_samples=200):
#return evaluate_catmull_rom_spline(C,T,sd,num_samples=num_samples)
return evaluate_minimum_variation_nonlocal_interpolating_b_spline(C,T,sd,T_eval=T_eval,num_samples=num_samples)
def compute_spatial_trajectory_and_arc_distance(P,T=None,S=None,num_samples=200,inNED=True):
C,T,sd = _get_spatial_spline_coefficients(P,T=T,S=S,degree=9,return_derivatives=False)
p,T_eval,dT = _evaluate_spatial_spline(C,T,sd,num_samples=num_samples)
# Turn into NED:
if not inNED:
p = numpy.array([llh2ned(point, p[0]) for point in p])
if len(p.shape) == 1:
p = matrix(p).T
else:
p = matrix(p)
num_samples_p = p.shape[0]
num_dimensions = p.shape[1]
t_p_linspace = linspace(0.0,T[-1,0],num_samples_p)
D = sklearn.metrics.pairwise_distances(p,p)
l = diag(D,k=1)
l_cum = r_[0.0,cumsum(l)]
l_cum_f = scipy.interpolate.interp1d(t_p_linspace, l_cum)
knot_arc_distances = l_cum_f(T[:,0])
return C,T,sd,knot_arc_distances
def reparameterize_spline(P_spline, T_spline, P_ease, T_ease, num_samples=200, ref_llh = None, isNED=True):
"""
This assumes the easing curve in position and time is normalized
- P_Ease in [0,1]
- T_Ease in [0,1]
Input: A description of a spline, and an easing curve for time to distance (normalized).
Calculates the (time -> distance -> spline parameter) mapping.
Returns the resulting table of time to spline parameter values, such that
sweeping linearly through time will result in spline parameters that move along the spline
according to the time->distance easing curve.
"""
# First, calculate a spline for P_spline and P_ease
C_spline,T_spline,sd_spline,dist = compute_spatial_trajectory_and_arc_distance(P_spline,T=T_spline)
C_ease,T_ease,sd_ease = compute_easing_curve(P_ease,T=T_ease)
# Then sample that densely
Spline_eval,T_spline_eval,dT_spline = _evaluate_spatial_spline(C_spline,T_spline,sd_spline,num_samples=num_samples)
Ease_eval,T_ease_eval,dT_ease = _evaluate_easing_spline(C_ease,T_ease,sd_ease,num_samples=num_samples)
if not isNED:
if ref_llh is None:
ref_llh = Spline_eval[0]
# Move into NED space, where everything is in meters.
Spline_eval = np.array([llh2ned(point, ref_llh) for point in Spline_eval])
assert min(Ease_eval) > -0.0001
assert max(Ease_eval) < 1.0001
Ease_eval = Ease_eval[:,0]/Ease_eval[-1,0]
Ease_eval = clip(Ease_eval,0,1)
# Finally, reparameterize the spline curve first into dist then modulate with ease
p_user_progress, t_user_progress, cumLength, t_user_progress_linspace_norm = reparameterize_curve(Spline_eval,Ease_eval)
# Then return a table of t_user_progress_linspace_norm
return t_user_progress_linspace_norm, t_user_progress, p_user_progress, ref_llh
def calculate_feasibility_ned(P_lookFrom_spline, T_lookFrom_spline, P_lookAt_spline, T_lookAt_spline, P_lookFrom_ease, T_lookFrom_ease, P_lookAt_ease, T_lookAt_ease, total_time, refLLH):
lookFrom_t_user_progress_linspace_norm, lookFrom_t_user_progress, lookFrom_p_user_progress, lookFrom_ref_llh = reparameterize_spline(P_lookFrom_spline, T_lookFrom_spline, P_lookFrom_ease, T_lookFrom_ease)
lookAt_t_user_progress_linspace_norm, lookAt_t_user_progress, lookAt_p_user_progress, lookAt_ref_llh = reparameterize_spline(P_lookAt_spline, T_lookAt_spline, P_lookAt_ease, T_lookAt_ease)
y_axis_cam_hint_nominal = c_[ zeros_like(lookAt_t_user_progress), ones_like(lookAt_t_user_progress), zeros_like(lookAt_t_user_progress) ]
#do conversion
#north, negative down, east
i = numpy.array([0, 2, 1])
lookFrom_p_user_progress = lookFrom_p_user_progress[:,i]
lookAt_p_user_progress = lookAt_p_user_progress[:,i]
#lookFrom_p_user_progress[:, 1] *= -1
#lookAt_p_user_progress[:, 1] *= -1
dt = lookAt_t_user_progress_linspace_norm[1] * total_time;
q_q_dot_q_dot_dot_nominal = compute_state_space_trajectory_and_derivatives(lookFrom_p_user_progress,lookAt_p_user_progress,y_axis_cam_hint_nominal,dt)
u_nominal = compute_control_trajectory(q_q_dot_q_dot_dot_nominal)
p_body_nominal, p_body_dot_nominal, p_body_dot_dot_nominal, theta_body_nominal, theta_body_dot_nominal, theta_body_dot_dot_nominal, psi_body_nominal, psi_body_dot_nominal, psi_body_dot_dot_nominal, phi_body_nominal, phi_body_dot_nominal, phi_body_dot_dot_nominal, theta_cam_nominal, theta_cam_dot_nominal, theta_cam_dot_dot_nominal, psi_cam_nominal, psi_cam_dot_nominal, psi_cam_dot_dot_nominal, phi_cam_nominal, phi_cam_dot_nominal, phi_cam_dot_dot_nominal = q_q_dot_q_dot_dot_nominal
return u_nominal, p_body_nominal, p_body_dot_nominal, p_body_dot_dot_nominal, theta_body_nominal, phi_body_nominal, theta_cam_nominal, theta_cam_dot_nominal, psi_cam_nominal, phi_cam_nominal, phi_cam_dot_nominal
def calculate_feasibility(P_cameraPose, T_cameraPose, P_lookAt, T_lookAt, P_easingCameraPose, T_easingCameraPose, P_easingLookAt, T_easingLookAt, total_time):
lookFrom_t_user_progress_linspace_norm, lookFrom_t_user_progress, lookFrom_p_user_progress, lookFrom_ref_llh = reparameterize_spline(P_cameraPose, T_cameraPose, P_easingCameraPose, T_easingCameraPose)
lookAt_t_user_progress_linspace_norm, lookAt_t_user_progress, lookAt_p_user_progress, lookAt_ref_llh = reparameterize_spline(P_lookAt, T_lookAt, P_easingLookAt, T_easingLookAt, ref_llh=lookFrom_ref_llh)
y_axis_cam_hint_nominal = c_[ zeros_like(lookAt_t_user_progress), ones_like(lookAt_t_user_progress), zeros_like(lookAt_t_user_progress) ]
#do conversion
#north, negative down, east
i = numpy.array([0, 2, 1])
lookFrom_p_user_progress = lookFrom_p_user_progress[:,i]
lookAt_p_user_progress = lookAt_p_user_progress[:,i]
lookFrom_p_user_progress[:, 1] *= -1
lookAt_p_user_progress[:, 1] *= -1
dt = lookAt_t_user_progress_linspace_norm[1] * total_time;
q_q_dot_q_dot_dot_nominal = compute_state_space_trajectory_and_derivatives(lookFrom_p_user_progress,lookAt_p_user_progress,y_axis_cam_hint_nominal,dt)
u_nominal = compute_control_trajectory(q_q_dot_q_dot_dot_nominal)
p_body_nominal, p_body_dot_nominal, p_body_dot_dot_nominal, theta_body_nominal, theta_body_dot_nominal, theta_body_dot_dot_nominal, psi_body_nominal, psi_body_dot_nominal, psi_body_dot_dot_nominal, phi_body_nominal, phi_body_dot_nominal, phi_body_dot_dot_nominal, theta_cam_nominal, theta_cam_dot_nominal, theta_cam_dot_dot_nominal, psi_cam_nominal, psi_cam_dot_nominal, psi_cam_dot_dot_nominal, phi_cam_nominal, phi_cam_dot_nominal, phi_cam_dot_dot_nominal = q_q_dot_q_dot_dot_nominal
return u_nominal, p_body_nominal, p_body_dot_nominal, p_body_dot_dot_nominal, theta_body_nominal, phi_body_nominal, theta_cam_nominal, theta_cam_dot_nominal, psi_cam_nominal, phi_cam_nominal, phi_cam_dot_nominal
|
{
"content_hash": "50132ee905dfc02780fe39c4465f3c08",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 490,
"avg_line_length": 46.52232142857143,
"alnum_prop": 0.659629594088859,
"repo_name": "stanford-gfx/Horus",
"id": "12640924dabe2e8d9b06bd6c24486b93ea3b1322",
"size": "10421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Code/HorusApp/app/trajectoryAPI.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8556"
},
{
"name": "HTML",
"bytes": "12243"
},
{
"name": "JavaScript",
"bytes": "721182"
},
{
"name": "Python",
"bytes": "291985"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from django.test import TestCase
from django.test import Client
from django.contrib.auth.models import User
import mock
import os
os.environ["NUM_BUDGET_YEARS"] = '2'
from ...taxbrain.models import TaxSaveInputs
from ...taxbrain.models import convert_to_floats
from ...taxbrain.helpers import (expand_1D, expand_2D, expand_list, package_up_vars,
format_csv, arrange_totals_by_row, default_taxcalc_data)
from ...taxbrain.compute import DropqCompute, MockCompute
import taxcalc
from taxcalc import Policy
from .utils import *
from ..models import DynamicSaveInputs
START_YEAR = 2016
class DynamicOGUSAViewsTests(TestCase):
''' Test the ogusa dynamic views of this app. '''
def setUp(self):
# Every test needs a client.
self.client = Client()
user = User.objects.create_user('temporary', 'temporary@gmail.com', 'temporary')
def test_ogusa_different_start_years(self):
self.client.login(username='temporary', password='temporary')
# Do the microsim
start_year = 2015
reform = {u'ID_BenefitSurtax_Switch_1': [u'True'],
u'ID_BenefitSurtax_Switch_0': [u'True'],
u'ID_BenefitSurtax_Switch_3': [u'True'],
u'ID_BenefitSurtax_Switch_2': [u'True'],
u'ID_BenefitSurtax_Switch_5': [u'True'],
u'ID_BenefitSurtax_Switch_4': [u'True'],
u'ID_BenefitSurtax_Switch_6': [u'True'],
u'has_errors': [u'False'], u'II_em': [u'4333'],
u'start_year': unicode(start_year), 'csrfmiddlewaretoken': 'abc123'}
# Do a 2015 microsim
micro_2015 = do_micro_sim(self.client, reform)
# Do the ogusa simulation based on this microsim
ogusa_reform = {u'frisch': [u'0.42']}
ogusa_response = do_ogusa_sim(self.client, micro_2015, ogusa_reform, start_year)
orig_micro_model_num = micro_2015.url[-2:-1]
# Do a 2016 microsim
micro_2016 = do_micro_sim(self.client, reform)
start_year = 2016
# Do the ogusa simulation based on this microsim
ogusa_reform = {u'frisch': [u'0.43']}
ogusa_response2 = do_ogusa_sim(self.client, micro_2016, ogusa_reform, start_year, increment=1)
# Do a callback to say that the result is ready
#self.client.get('/dynamic/dynamic_finished/?job_id=ogusa424243&status=SUCCESS')
job_id = 'ogusa424243'
qs = DynamicSaveInputs.objects.filter(job_ids__contains=job_id)
dsi = qs[0]
assert dsi.frisch == u'0.43'
assert dsi.first_year == 2016
def test_ogusa_not_logged_in_no_email_fails(self):
# Do the microsim
start_year = 2015
self.client.logout()
reform = {u'ID_BenefitSurtax_Switch_1': [u'True'],
u'ID_BenefitSurtax_Switch_0': [u'True'],
u'ID_BenefitSurtax_Switch_3': [u'True'],
u'ID_BenefitSurtax_Switch_2': [u'True'],
u'ID_BenefitSurtax_Switch_5': [u'True'],
u'ID_BenefitSurtax_Switch_4': [u'True'],
u'ID_BenefitSurtax_Switch_6': [u'True'],
u'has_errors': [u'False'], u'II_em': [u'4333'],
u'start_year': unicode(start_year), 'csrfmiddlewaretoken': 'abc123'}
# Do a 2015 microsim
micro_2015 = do_micro_sim(self.client, reform)
# Do the ogusa simulation based on this microsim
ogusa_reform = {u'frisch': [u'0.42']}
ogusa_status_code = 403 # Should raise an error on no email address
ogusa_response = do_ogusa_sim(self.client, micro_2015, ogusa_reform,
start_year, exp_status_code=ogusa_status_code)
def test_ogusa_not_logged_with_email_succeeds(self):
# Do the microsim
start_year = 2015
self.client.logout()
reform = {u'ID_BenefitSurtax_Switch_1': [u'True'],
u'ID_BenefitSurtax_Switch_0': [u'True'],
u'ID_BenefitSurtax_Switch_3': [u'True'],
u'ID_BenefitSurtax_Switch_2': [u'True'],
u'ID_BenefitSurtax_Switch_5': [u'True'],
u'ID_BenefitSurtax_Switch_4': [u'True'],
u'ID_BenefitSurtax_Switch_6': [u'True'],
u'has_errors': [u'False'], u'II_em': [u'4333'],
u'start_year': unicode(start_year),
'csrfmiddlewaretoken': 'abc123'}
# Do a 2015 microsim
micro_2015 = do_micro_sim(self.client, reform)
# Do the ogusa simulation based on this microsim
ogusa_reform = {u'frisch': [u'0.42'], u'user_email': 'test@example.com'}
ogusa_status_code = 403 # Should raise an error on no email address
ogusa_response = do_ogusa_sim(self.client, micro_2015, ogusa_reform,
start_year)
def test_ogusa_round_robins(self):
# Do the microsim
start_year = 2015
self.client.login(username='temporary', password='temporary')
import sys
from webapp.apps.dynamic import helpers
from webapp.apps.dynamic import compute
# Monkey patch the variables we need to test
helpers.OGUSA_WORKER_IDX = 0
helpers.OGUSA_WORKERS = ['host1', 'host2', 'host3']
compute.OGUSA_WORKERS = ['host1', 'host2', 'host3']
reform = {u'ID_BenefitSurtax_Switch_1': [u'True'],
u'ID_BenefitSurtax_Switch_0': [u'True'],
u'ID_BenefitSurtax_Switch_3': [u'True'],
u'ID_BenefitSurtax_Switch_2': [u'True'],
u'ID_BenefitSurtax_Switch_5': [u'True'],
u'ID_BenefitSurtax_Switch_4': [u'True'],
u'ID_BenefitSurtax_Switch_6': [u'True'],
u'has_errors': [u'False'], u'II_em': [u'4333'],
u'start_year': unicode(start_year),
'csrfmiddlewaretoken': 'abc123'}
# Do a 2015 microsim
micro_2015 = do_micro_sim(self.client, reform)
#Assert the the worker node index has been reset to 0
assert helpers.get_ogusa_worker_idx() == 0
# Do the ogusa simulation based on this microsim
ogusa_reform = {u'frisch': [u'0.42']}
ogusa_status_code = 403 # Should raise an error on no email address
ogusa_response = do_ogusa_sim(self.client, micro_2015, ogusa_reform,
start_year)
#Assert the the worker node index has incremented
assert helpers.get_ogusa_worker_idx() == 1
ogusa_reform = {u'frisch': [u'0.42']}
ogusa_status_code = 403 # Should raise an error on no email address
ogusa_response = do_ogusa_sim(self.client, micro_2015, ogusa_reform,
start_year)
#Assert the the worker node index has incremented again
assert helpers.get_ogusa_worker_idx() == 2
|
{
"content_hash": "be428e9288e7a71414047d5e1a55c43e",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 102,
"avg_line_length": 42.79012345679013,
"alnum_prop": 0.586266589728794,
"repo_name": "zrisher/webapp-public",
"id": "c481f8d08b6d97faae8c95abd5a935d50c9cb4bc",
"size": "6932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webapp/apps/dynamic/tests/test_ogusa.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "877372"
},
{
"name": "HTML",
"bytes": "64722"
},
{
"name": "JavaScript",
"bytes": "86106"
},
{
"name": "Python",
"bytes": "406502"
},
{
"name": "Shell",
"bytes": "17"
}
],
"symlink_target": ""
}
|
import os
import frappe
class DbManager:
def __init__(self, db):
"""
Pass root_conn here for access to all databases.
"""
if db:
self.db = db
def get_current_host(self):
return self.db.sql("select user()")[0][0].split('@')[1]
def create_user(self, user, password, host=None):
# Create user if it doesn't exist.
if not host:
host = self.get_current_host()
if password:
self.db.sql("CREATE USER '%s'@'%s' IDENTIFIED BY '%s';" % (user, host, password))
else:
self.db.sql("CREATE USER '%s'@'%s';" % (user, host))
def delete_user(self, target, host=None):
if not host:
host = self.get_current_host()
try:
self.db.sql("DROP USER '%s'@'%s';" % (target, host))
except Exception as e:
if e.args[0] == 1396:
pass
else:
raise
def create_database(self, target):
if target in self.get_database_list():
self.drop_database(target)
self.db.sql("CREATE DATABASE `%s` ;" % target)
def drop_database(self, target):
self.db.sql("DROP DATABASE IF EXISTS `%s`;" % target)
def grant_all_privileges(self, target, user, host=None):
if not host:
host = self.get_current_host()
self.db.sql("GRANT ALL PRIVILEGES ON `%s`.* TO '%s'@'%s';" % (target, user, host))
def flush_privileges(self):
self.db.sql("FLUSH PRIVILEGES")
def get_database_list(self):
"""get list of databases"""
return [d[0] for d in self.db.sql("SHOW DATABASES")]
@staticmethod
def restore_database(target, source, user, password):
from frappe.utils import make_esc
esc = make_esc('$ ')
from distutils.spawn import find_executable
pipe = find_executable('pv')
if pipe:
pipe = '{pipe} {source} |'.format(
pipe=pipe,
source=source
)
source = ''
else:
pipe = ''
source = '< {source}'.format(source=source)
if pipe:
print('Creating Database...')
command = '{pipe} mysql -u {user} -p{password} -h{host} {target} {source}'.format(
pipe=pipe,
user=esc(user),
password=esc(password),
host=esc(frappe.db.host),
target=esc(target),
source=source
)
os.system(command)
|
{
"content_hash": "96f3195602e1a7248df62d8a0f62e83e",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 84,
"avg_line_length": 23.5,
"alnum_prop": 0.6320116054158608,
"repo_name": "RicardoJohann/frappe",
"id": "0954657b28d1888559116ee8e39ff821b13af315",
"size": "2068",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "frappe/database/db_manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "447183"
},
{
"name": "HTML",
"bytes": "199549"
},
{
"name": "JavaScript",
"bytes": "2009239"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "2338007"
},
{
"name": "Shell",
"bytes": "2296"
},
{
"name": "Vue",
"bytes": "24090"
}
],
"symlink_target": ""
}
|
from fiber_properties import polynomial_fit, polynomial_array, FiberImage, crop_image, plot_overlaid_cross_sections
import numpy as np
import matplotlib.pyplot as plt
# image = np.meshgrid(np.arange(100), np.arange(100))
# coeffs = np.random.rand(10) * 10.0
# print coeffs
# image = polynomial_array(image, *coeffs).reshape(100,100).astype('float64')
# image += np.random.normal(size=image.shape)*5.0
im_obj = FiberImage('../data/modal_noise/Kris_data/rectangular_100x300um/coupled_agitation/ff_corrected.fit')
image = im_obj.get_image()
center = im_obj.get_fiber_center()
# radius = im_obj.get_fiber_radius()
# image, center = crop_image(image, center=center, radius=radius/2)
plt.figure(1)
plt.imshow(image)
# fit = polynomial_fit(image, 6)
fit = im_obj.get_polynomial_fit()
plt.figure(2)
plt.imshow(fit)
plot_overlaid_cross_sections(image, fit, center)
plt.show()
|
{
"content_hash": "9d7639af5683e9aa4f70479b0abdcc7b",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 115,
"avg_line_length": 33.25925925925926,
"alnum_prop": 0.7182628062360802,
"repo_name": "rpetersburg/fiber_properties",
"id": "4e61e7fa0f656ebd8747e67d7a980c4df06d237f",
"size": "898",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "code_testing/polynomial_fit_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "89"
},
{
"name": "Jupyter Notebook",
"bytes": "15690"
},
{
"name": "Python",
"bytes": "343420"
}
],
"symlink_target": ""
}
|
import json
from dateutil import parser
from datetime import datetime
from utils.date import get_year, get_month, get_weeknumber, get_weekday
def extract_month(utc_timestamp):
"""
Extracts month from utc timestamp string.
"""
datetime = parser.parse(utc_timestamp)
return '{0}-{1}'.format(datetime.year, datetime.month)
def extract_day(utc_timestamp):
"""
Extracts day from utc timestamp string.
"""
datetime = parser.parse(utc_timestamp)
return '{0}-{1}-{2}'.format(datetime.year, datetime.month, datetime.day)
class DataPresentation:
"""
Represents a presentation format for a given dataset (Datasets are python 'dict's basically).
If data format is 'py' then it's left alone.
But in case of 'js', a json compatible presentation is returned.
"""
JS_FORMAT, PY_FORMAT = 'js', 'py'
VALID_DATA_FORMATS = [JS_FORMAT, PY_FORMAT] # Available data presentation output formats.
def __init__(self, data_format):
if not data_format in self.VALID_DATA_FORMATS: raise ValueError('Input data format is not a valid one.')
self.data_format = data_format
def present(self, data):
"""
Returns presentation of the given dataset.
"""
if self.data_format == 'js':
return json.dumps(data)
elif self.data_format == 'py':
return data
class GitStatistics:
"""
Generates data analysis for a git repository. This data will be available
in python 'dict' or javascript 'json' formats. One can use this statistics
to plot graphs or analyze repository activities.
"""
# Available time intervals for generating datasets.
DAILY_INTERVALS = 'daily'
WEEKLY_INTERVALS = 'weekly'
MONTHLY_INTERVALS = 'monthly'
VALID_DATA_GENERATION_INTERVALS = [DAILY_INTERVALS, WEEKLY_INTERVALS, MONTHLY_INTERVALS]
def __init__(self, repo, rev):
self.repo = repo
self.rev = rev
self.current_year = datetime.utcnow().isocalendar()[0]
self.current_week = datetime.utcnow().isocalendar()[1]
def _for_commits_daily(self, commits):
"""
Returns number of commits per day for the given commits.
"""
# get dates only in the current year.
dates = [extract_day(commit.get_committer_date()) for commit in commits \
if get_year(commit.get_committer_date()) == self.current_year]
return {date: dates.count(date) for date in dates}
def _for_commits_weekly(self, commits):
"""
Returns number of commits per day for the given commits.
"""
# get dates only in the current year and current week.
dates = [get_weekday(extract_day(commit.get_committer_date())) for commit in commits \
if get_year(commit.get_committer_date()) == self.current_year and \
get_weeknumber(commit.get_committer_date()) == self.current_week]
return {wd: dates.count(wd) if wd in dates else 0 for wd in range(1, 8)}
def _for_commits_monthly(self, commits):
"""
Returns number of commits per month for the given commits.
"""
dates = [get_month(extract_month(commit.get_committer_date())) for commit in commits
if get_year(commit.get_committer_date()) == self.current_year]
return {mn: dates.count(mn) if mn in dates else 0 for mn in range(1, 13)}
def for_commits(self, by, data_format):
"""
Returns dataset for number of commits per given time interval.
"""
if not by in self.VALID_DATA_GENERATION_INTERVALS: raise ValueError('Input interval is not a valid one.')
commits = self.repo.get_commits(self.rev)
if by == self.DAILY_INTERVALS:
return DataPresentation(data_format).present(self._for_commits_daily(commits))
elif by == self.WEEKLY_INTERVALS:
return DataPresentation(data_format).present(self._for_commits_weekly(commits))
elif by == self.MONTHLY_INTERVALS:
return DataPresentation(data_format).present(self._for_commits_monthly(commits))
|
{
"content_hash": "e3b64213ab5c42ff6ac432ce40b8dc35",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 113,
"avg_line_length": 34.479674796747965,
"alnum_prop": 0.6286253242159868,
"repo_name": "Djacket/djacket",
"id": "4048f06562f31ee2b9f2afc8a47d3f3f0ce0ffde",
"size": "4241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/backend/git/statistics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29609"
},
{
"name": "HTML",
"bytes": "35321"
},
{
"name": "JavaScript",
"bytes": "12900"
},
{
"name": "Python",
"bytes": "88290"
},
{
"name": "Shell",
"bytes": "11320"
}
],
"symlink_target": ""
}
|
from flask import _app_ctx_stack
from sqlalchemy import engine, orm
from sqlalchemy.orm.exc import UnmappedClassError
class _QueryProperty:
def __init__(self, sa):
self.sa = sa
def __get__(self, obj, type):
try:
mapper = orm.class_mapper(type)
if mapper:
return type.query_class(mapper, session=self.sa.session())
except UnmappedClassError:
return None
class SQLAlchemyDB:
'''
Simple SQLAlchemy helper inspired by Flask-SQLAlchemy code.
Allows the code to use a session bind to Flask context.
'''
def __init__(self, db_connection, base_model, session_options=None, **kwargs):
self.engine = engine.create_engine(db_connection, **kwargs)
self.Query = orm.Query
self.session = self.create_scoped_session(session_options)
self.Model = self.extend_base_model(base_model)
def extend_base_model(self, base):
if not getattr(base, 'query_class', None):
base.query_class = self.Query
base.query = _QueryProperty(self)
return base
@property
def metadata(self):
return self.Model.metadata
def create_scoped_session(self, options=None):
options = options or {}
scopefunc = _app_ctx_stack.__ident_func__
options.setdefault('query_cls', self.Query)
return orm.scoped_session(
self.create_session(options), scopefunc=scopefunc)
def create_session(self, options):
return orm.sessionmaker(bind=self.engine, **options)
def create_all_tables(self):
self.metadata.create_all(bind=self.engine)
def init_app(self, app):
@app.teardown_appcontext
def shutdown_session(response_or_exc):
self.session.remove()
return response_or_exc
|
{
"content_hash": "d280c241a1b4ca74d11b1903f0fd0b06",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 82,
"avg_line_length": 29.063492063492063,
"alnum_prop": 0.6324412889131622,
"repo_name": "EclecticIQ/OpenTAXII",
"id": "a534862cc2eb3cc524b68f910855ca2df5e19922",
"size": "1831",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opentaxii/sqldb_helper.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "237268"
},
{
"name": "Shell",
"bytes": "3174"
}
],
"symlink_target": ""
}
|
__author__ = "Vivek Dhayaal"
__copyright__ = "Copyright 2014, Reliance Jio Infocomm Ltd."
from django import shortcuts
from django.core.urlresolvers import reverse
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard.openstack.common import timeutils
from horizon_jiocloud.common import forms as contrib_forms
from horizon_jiocloud.api import keystoneapi
from horizon_jiocloud.utils.utils import send_templated_email, \
validate_ril_email, generate_sms_activation_code
import logging
LOG = logging.getLogger(__name__)
class EmailForm(contrib_forms.ContribSelfHandlingForm):
email = forms.EmailField(widget=forms.TextInput(attrs={'placeholder': 'Your Email'}), validators=[validate_ril_email])
def __init__(self, request, *args, **kwargs):
super(EmailForm, self).__init__(request, *args, **kwargs)
res = {}
self.user = None
try:
res = keystoneapi.get_user(self.request.user.id)
except Exception as ex:
LOG.exception(ex)
self.set_non_field_errors([self.get_default_error_message()])
return None
if not (res.get("success") or res.get("result")):
return None
self.user = res.get("result")
kwargs["initial"]["email"] = self.user.get("email")
def handle(self, request, data):
new_email = data.get("email")
email_activation_code = generate_sms_activation_code()
data = {"email_activation_code": email_activation_code,
"email_activation_code_time": timeutils.strtime()}
try:
# email confirmation about the change
send_templated_email("JioCloud account email change confirmation",
[new_email],
html_template="change_email/email_change_verification.html",
template_context={
"name": self.user.get("first_name") if self.user.get("first_name") else self.user.get("name"),
"email_activation_path": reverse('horizon:settings:email:index') + 'activate/' + email_activation_code + '?email=' + new_email,
})
# push changes to keystone database
response = keystoneapi.update_user(self.request.user.id, data)
messages.success(request,
'Please click the link sent to your new email to activate it')
except Exception as ex:
LOG.exception(ex)
response = exceptions.handle(request, ignore=True)
messages.error(request, 'Unable to change email.')
return shortcuts.redirect(request.build_absolute_uri())
|
{
"content_hash": "1c98879fb6f5f7578ffafcb0f634addb",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 143,
"avg_line_length": 42.95238095238095,
"alnum_prop": 0.6341463414634146,
"repo_name": "JioCloud/horizon",
"id": "e5f8007b3f593a4995a0b0e4c35ac4e41d9df3d4",
"size": "2751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "horizon_jiocloud/change_email/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "296932"
},
{
"name": "JavaScript",
"bytes": "713370"
},
{
"name": "Python",
"bytes": "3614755"
},
{
"name": "Shell",
"bytes": "15387"
}
],
"symlink_target": ""
}
|
import uuid
import base64
import time
from boto.compat import json
from boto.cloudfront.identity import OriginAccessIdentity
from boto.cloudfront.object import Object, StreamingObject
from boto.cloudfront.signers import ActiveTrustedSigners, TrustedSigners
from boto.cloudfront.logging import LoggingInfo
from boto.cloudfront.origin import S3Origin, CustomOrigin
from boto.s3.acl import ACL
class DistributionConfig(object):
def __init__(self, connection=None, origin=None, enabled=False,
caller_reference='', cnames=None, comment='',
trusted_signers=None, default_root_object=None,
logging=None):
"""
:param origin: Origin information to associate with the
distribution. If your distribution will use
an Amazon S3 origin, then this should be an
S3Origin object. If your distribution will use
a custom origin (non Amazon S3), then this
should be a CustomOrigin object.
:type origin: :class:`boto.cloudfront.origin.S3Origin` or
:class:`boto.cloudfront.origin.CustomOrigin`
:param enabled: Whether the distribution is enabled to accept
end user requests for content.
:type enabled: bool
:param caller_reference: A unique number that ensures the
request can't be replayed. If no
caller_reference is provided, boto
will generate a type 4 UUID for use
as the caller reference.
:type enabled: str
:param cnames: A CNAME alias you want to associate with this
distribution. You can have up to 10 CNAME aliases
per distribution.
:type enabled: array of str
:param comment: Any comments you want to include about the
distribution.
:type comment: str
:param trusted_signers: Specifies any AWS accounts you want to
permit to create signed URLs for private
content. If you want the distribution to
use signed URLs, this should contain a
TrustedSigners object; if you want the
distribution to use basic URLs, leave
this None.
:type trusted_signers: :class`boto.cloudfront.signers.TrustedSigners`
:param default_root_object: Designates a default root object.
Only include a DefaultRootObject value
if you are going to assign a default
root object for the distribution.
:type comment: str
:param logging: Controls whether access logs are written for the
distribution. If you want to turn on access logs,
this should contain a LoggingInfo object; otherwise
it should contain None.
:type logging: :class`boto.cloudfront.logging.LoggingInfo`
"""
self.connection = connection
self.origin = origin
self.enabled = enabled
if caller_reference:
self.caller_reference = caller_reference
else:
self.caller_reference = str(uuid.uuid4())
self.cnames = []
if cnames:
self.cnames = cnames
self.comment = comment
self.trusted_signers = trusted_signers
self.logging = logging
self.default_root_object = default_root_object
def to_xml(self):
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<DistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n'
if self.origin:
s += self.origin.to_xml()
s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
for cname in self.cnames:
s += ' <CNAME>%s</CNAME>\n' % cname
if self.comment:
s += ' <Comment>%s</Comment>\n' % self.comment
s += ' <Enabled>'
if self.enabled:
s += 'true'
else:
s += 'false'
s += '</Enabled>\n'
if self.trusted_signers:
s += '<TrustedSigners>\n'
for signer in self.trusted_signers:
if signer == 'Self':
s += ' <Self></Self>\n'
else:
s += ' <AwsAccountNumber>%s</AwsAccountNumber>\n' % signer
s += '</TrustedSigners>\n'
if self.logging:
s += '<Logging>\n'
s += ' <Bucket>%s</Bucket>\n' % self.logging.bucket
s += ' <Prefix>%s</Prefix>\n' % self.logging.prefix
s += '</Logging>\n'
if self.default_root_object:
dro = self.default_root_object
s += '<DefaultRootObject>%s</DefaultRootObject>\n' % dro
s += '</DistributionConfig>\n'
return s
def startElement(self, name, attrs, connection):
if name == 'TrustedSigners':
self.trusted_signers = TrustedSigners()
return self.trusted_signers
elif name == 'Logging':
self.logging = LoggingInfo()
return self.logging
elif name == 'S3Origin':
self.origin = S3Origin()
return self.origin
elif name == 'CustomOrigin':
self.origin = CustomOrigin()
return self.origin
else:
return None
def endElement(self, name, value, connection):
if name == 'CNAME':
self.cnames.append(value)
elif name == 'Comment':
self.comment = value
elif name == 'Enabled':
if value.lower() == 'true':
self.enabled = True
else:
self.enabled = False
elif name == 'CallerReference':
self.caller_reference = value
elif name == 'DefaultRootObject':
self.default_root_object = value
else:
setattr(self, name, value)
class StreamingDistributionConfig(DistributionConfig):
def __init__(self, connection=None, origin='', enabled=False,
caller_reference='', cnames=None, comment='',
trusted_signers=None, logging=None):
DistributionConfig.__init__(self, connection=connection,
origin=origin, enabled=enabled,
caller_reference=caller_reference,
cnames=cnames, comment=comment,
trusted_signers=trusted_signers,
logging=logging)
def to_xml(self):
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<StreamingDistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n'
if self.origin:
s += self.origin.to_xml()
s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
for cname in self.cnames:
s += ' <CNAME>%s</CNAME>\n' % cname
if self.comment:
s += ' <Comment>%s</Comment>\n' % self.comment
s += ' <Enabled>'
if self.enabled:
s += 'true'
else:
s += 'false'
s += '</Enabled>\n'
if self.trusted_signers:
s += '<TrustedSigners>\n'
for signer in self.trusted_signers:
if signer == 'Self':
s += ' <Self/>\n'
else:
s += ' <AwsAccountNumber>%s</AwsAccountNumber>\n' % signer
s += '</TrustedSigners>\n'
if self.logging:
s += '<Logging>\n'
s += ' <Bucket>%s</Bucket>\n' % self.logging.bucket
s += ' <Prefix>%s</Prefix>\n' % self.logging.prefix
s += '</Logging>\n'
s += '</StreamingDistributionConfig>\n'
return s
class DistributionSummary(object):
def __init__(self, connection=None, domain_name='', id='',
last_modified_time=None, status='', origin=None,
cname='', comment='', enabled=False):
self.connection = connection
self.domain_name = domain_name
self.id = id
self.last_modified_time = last_modified_time
self.status = status
self.origin = origin
self.enabled = enabled
self.cnames = []
if cname:
self.cnames.append(cname)
self.comment = comment
self.trusted_signers = None
self.etag = None
self.streaming = False
def startElement(self, name, attrs, connection):
if name == 'TrustedSigners':
self.trusted_signers = TrustedSigners()
return self.trusted_signers
elif name == 'S3Origin':
self.origin = S3Origin()
return self.origin
elif name == 'CustomOrigin':
self.origin = CustomOrigin()
return self.origin
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'Status':
self.status = value
elif name == 'LastModifiedTime':
self.last_modified_time = value
elif name == 'DomainName':
self.domain_name = value
elif name == 'Origin':
self.origin = value
elif name == 'CNAME':
self.cnames.append(value)
elif name == 'Comment':
self.comment = value
elif name == 'Enabled':
if value.lower() == 'true':
self.enabled = True
else:
self.enabled = False
elif name == 'StreamingDistributionSummary':
self.streaming = True
else:
setattr(self, name, value)
def get_distribution(self):
return self.connection.get_distribution_info(self.id)
class StreamingDistributionSummary(DistributionSummary):
def get_distribution(self):
return self.connection.get_streaming_distribution_info(self.id)
class Distribution(object):
def __init__(self, connection=None, config=None, domain_name='',
id='', last_modified_time=None, status=''):
self.connection = connection
self.config = config
self.domain_name = domain_name
self.id = id
self.last_modified_time = last_modified_time
self.status = status
self.in_progress_invalidation_batches = 0
self.active_signers = None
self.etag = None
self._bucket = None
self._object_class = Object
def startElement(self, name, attrs, connection):
if name == 'DistributionConfig':
self.config = DistributionConfig()
return self.config
elif name == 'ActiveTrustedSigners':
self.active_signers = ActiveTrustedSigners()
return self.active_signers
else:
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'LastModifiedTime':
self.last_modified_time = value
elif name == 'Status':
self.status = value
elif name == 'InProgressInvalidationBatches':
self.in_progress_invalidation_batches = int(value)
elif name == 'DomainName':
self.domain_name = value
else:
setattr(self, name, value)
def update(self, enabled=None, cnames=None, comment=None):
"""
Update the configuration of the Distribution. The only values
of the DistributionConfig that can be directly updated are:
* CNAMES
* Comment
* Whether the Distribution is enabled or not
Any changes to the ``trusted_signers`` or ``origin`` properties of
this distribution's current config object will also be included in
the update. Therefore, to set the origin access identity for this
distribution, set ``Distribution.config.origin.origin_access_identity``
before calling this update method.
:type enabled: bool
:param enabled: Whether the Distribution is active or not.
:type cnames: list of str
:param cnames: The DNS CNAME's associated with this
Distribution. Maximum of 10 values.
:type comment: str or unicode
:param comment: The comment associated with the Distribution.
"""
new_config = DistributionConfig(self.connection, self.config.origin,
self.config.enabled, self.config.caller_reference,
self.config.cnames, self.config.comment,
self.config.trusted_signers,
self.config.default_root_object)
if enabled != None:
new_config.enabled = enabled
if cnames != None:
new_config.cnames = cnames
if comment != None:
new_config.comment = comment
self.etag = self.connection.set_distribution_config(self.id, self.etag, new_config)
self.config = new_config
self._object_class = Object
def enable(self):
"""
Deactivate the Distribution. A convenience wrapper around
the update method.
"""
self.update(enabled=True)
def disable(self):
"""
Activate the Distribution. A convenience wrapper around
the update method.
"""
self.update(enabled=False)
def delete(self):
"""
Delete this CloudFront Distribution. The content
associated with the Distribution is not deleted from
the underlying Origin bucket in S3.
"""
self.connection.delete_distribution(self.id, self.etag)
def _get_bucket(self):
if isinstance(self.config.origin, S3Origin):
if not self._bucket:
bucket_dns_name = self.config.origin.dns_name
bucket_name = bucket_dns_name.replace('.s3.amazonaws.com', '')
from boto.s3.connection import S3Connection
s3 = S3Connection(self.connection.aws_access_key_id,
self.connection.aws_secret_access_key,
proxy=self.connection.proxy,
proxy_port=self.connection.proxy_port,
proxy_user=self.connection.proxy_user,
proxy_pass=self.connection.proxy_pass)
self._bucket = s3.get_bucket(bucket_name)
self._bucket.distribution = self
self._bucket.set_key_class(self._object_class)
return self._bucket
else:
raise NotImplementedError('Unable to get_objects on CustomOrigin')
def get_objects(self):
"""
Return a list of all content objects in this distribution.
:rtype: list of :class:`boto.cloudfront.object.Object`
:return: The content objects
"""
bucket = self._get_bucket()
objs = []
for key in bucket:
objs.append(key)
return objs
def set_permissions(self, object, replace=False):
"""
Sets the S3 ACL grants for the given object to the appropriate
value based on the type of Distribution. If the Distribution
is serving private content the ACL will be set to include the
Origin Access Identity associated with the Distribution. If
the Distribution is serving public content the content will
be set up with "public-read".
:type object: :class:`boto.cloudfront.object.Object`
:param enabled: The Object whose ACL is being set
:type replace: bool
:param replace: If False, the Origin Access Identity will be
appended to the existing ACL for the object.
If True, the ACL for the object will be
completely replaced with one that grants
READ permission to the Origin Access Identity.
"""
if isinstance(self.config.origin, S3Origin):
if self.config.origin.origin_access_identity:
id = self.config.origin.origin_access_identity.split('/')[-1]
oai = self.connection.get_origin_access_identity_info(id)
policy = object.get_acl()
if replace:
policy.acl = ACL()
policy.acl.add_user_grant('READ', oai.s3_user_id)
object.set_acl(policy)
else:
object.set_canned_acl('public-read')
def set_permissions_all(self, replace=False):
"""
Sets the S3 ACL grants for all objects in the Distribution
to the appropriate value based on the type of Distribution.
:type replace: bool
:param replace: If False, the Origin Access Identity will be
appended to the existing ACL for the object.
If True, the ACL for the object will be
completely replaced with one that grants
READ permission to the Origin Access Identity.
"""
bucket = self._get_bucket()
for key in bucket:
self.set_permissions(key, replace)
def add_object(self, name, content, headers=None, replace=True):
"""
Adds a new content object to the Distribution. The content
for the object will be copied to a new Key in the S3 Bucket
and the permissions will be set appropriately for the type
of Distribution.
:type name: str or unicode
:param name: The name or key of the new object.
:type content: file-like object
:param content: A file-like object that contains the content
for the new object.
:type headers: dict
:param headers: A dictionary containing additional headers
you would like associated with the new
object in S3.
:rtype: :class:`boto.cloudfront.object.Object`
:return: The newly created object.
"""
if self.config.origin.origin_access_identity:
policy = 'private'
else:
policy = 'public-read'
bucket = self._get_bucket()
object = bucket.new_key(name)
object.set_contents_from_file(content, headers=headers, policy=policy)
if self.config.origin.origin_access_identity:
self.set_permissions(object, replace)
return object
def create_signed_url(self, url, keypair_id,
expire_time=None, valid_after_time=None,
ip_address=None, policy_url=None,
private_key_file=None, private_key_string=None):
"""
Creates a signed CloudFront URL that is only valid within the specified
parameters.
:type url: str
:param url: The URL of the protected object.
:type keypair_id: str
:param keypair_id: The keypair ID of the Amazon KeyPair used to sign
theURL. This ID MUST correspond to the private key
specified with private_key_file or private_key_string.
:type expire_time: int
:param expire_time: The expiry time of the URL. If provided, the URL
will expire after the time has passed. If not provided the URL will
never expire. Format is a unix epoch.
Use time.time() + duration_in_sec.
:type valid_after_time: int
:param valid_after_time: If provided, the URL will not be valid until
after valid_after_time. Format is a unix epoch.
Use time.time() + secs_until_valid.
:type ip_address: str
:param ip_address: If provided, only allows access from the specified
IP address. Use '192.168.0.10' for a single IP or
use '192.168.0.0/24' CIDR notation for a subnet.
:type policy_url: str
:param policy_url: If provided, allows the signature to contain
wildcard globs in the URL. For example, you could
provide: 'http://example.com/media/\*' and the policy
and signature would allow access to all contents of
the media subdirectory. If not specified, only
allow access to the exact url provided in 'url'.
:type private_key_file: str or file object.
:param private_key_file: If provided, contains the filename of the
private key file used for signing or an open
file object containing the private key
contents. Only one of private_key_file or
private_key_string can be provided.
:type private_key_string: str
:param private_key_string: If provided, contains the private key string
used for signing. Only one of private_key_file or
private_key_string can be provided.
:rtype: str
:return: The signed URL.
"""
# Get the required parameters
params = self._create_signing_params(
url=url, keypair_id=keypair_id, expire_time=expire_time,
valid_after_time=valid_after_time, ip_address=ip_address,
policy_url=policy_url, private_key_file=private_key_file,
private_key_string=private_key_string)
#combine these into a full url
if "?" in url:
sep = "&"
else:
sep = "?"
signed_url_params = []
for key in ["Expires", "Policy", "Signature", "Key-Pair-Id"]:
if key in params:
param = "%s=%s" % (key, params[key])
signed_url_params.append(param)
signed_url = url + sep + "&".join(signed_url_params)
return signed_url
def _create_signing_params(self, url, keypair_id,
expire_time=None, valid_after_time=None,
ip_address=None, policy_url=None,
private_key_file=None, private_key_string=None):
"""
Creates the required URL parameters for a signed URL.
"""
params = {}
# Check if we can use a canned policy
if expire_time and not valid_after_time and not ip_address and not policy_url:
# we manually construct this policy string to ensure formatting
# matches signature
policy = self._canned_policy(url, expire_time)
params["Expires"] = str(expire_time)
else:
# If no policy_url is specified, default to the full url.
if policy_url is None:
policy_url = url
# Can't use canned policy
policy = self._custom_policy(policy_url, expires=expire_time,
valid_after=valid_after_time,
ip_address=ip_address)
encoded_policy = self._url_base64_encode(policy)
params["Policy"] = encoded_policy
#sign the policy
signature = self._sign_string(policy, private_key_file, private_key_string)
#now base64 encode the signature (URL safe as well)
encoded_signature = self._url_base64_encode(signature)
params["Signature"] = encoded_signature
params["Key-Pair-Id"] = keypair_id
return params
@staticmethod
def _canned_policy(resource, expires):
"""
Creates a canned policy string.
"""
policy = ('{"Statement":[{"Resource":"%(resource)s",'
'"Condition":{"DateLessThan":{"AWS:EpochTime":'
'%(expires)s}}}]}' % locals())
return policy
@staticmethod
def _custom_policy(resource, expires=None, valid_after=None, ip_address=None):
"""
Creates a custom policy string based on the supplied parameters.
"""
condition = {}
# SEE: http://docs.amazonwebservices.com/AmazonCloudFront/latest/DeveloperGuide/RestrictingAccessPrivateContent.html#CustomPolicy
# The 'DateLessThan' property is required.
if not expires:
# Defaults to ONE day
expires = int(time.time()) + 86400
condition["DateLessThan"] = {"AWS:EpochTime": expires}
if valid_after:
condition["DateGreaterThan"] = {"AWS:EpochTime": valid_after}
if ip_address:
if '/' not in ip_address:
ip_address += "/32"
condition["IpAddress"] = {"AWS:SourceIp": ip_address}
policy = {"Statement": [{
"Resource": resource,
"Condition": condition}]}
return json.dumps(policy, separators=(",", ":"))
@staticmethod
def _sign_string(message, private_key_file=None, private_key_string=None):
"""
Signs a string for use with Amazon CloudFront.
Requires the rsa library be installed.
"""
try:
import rsa
except ImportError:
raise NotImplementedError("Boto depends on the python rsa "
"library to generate signed URLs for "
"CloudFront")
# Make sure only one of private_key_file and private_key_string is set
if private_key_file and private_key_string:
raise ValueError("Only specify the private_key_file or the private_key_string not both")
if not private_key_file and not private_key_string:
raise ValueError("You must specify one of private_key_file or private_key_string")
# If private_key_file is a file name, open it and read it
if private_key_string is None:
if isinstance(private_key_file, basestring):
with open(private_key_file, 'r') as file_handle:
private_key_string = file_handle.read()
# Otherwise, treat it like a file
else:
private_key_string = private_key_file.read()
# Sign it!
private_key = rsa.PrivateKey.load_pkcs1(private_key_string)
signature = rsa.sign(str(message), private_key, 'SHA-1')
return signature
@staticmethod
def _url_base64_encode(msg):
"""
Base64 encodes a string using the URL-safe characters specified by
Amazon.
"""
msg_base64 = base64.b64encode(msg)
msg_base64 = msg_base64.replace('+', '-')
msg_base64 = msg_base64.replace('=', '_')
msg_base64 = msg_base64.replace('/', '~')
return msg_base64
class StreamingDistribution(Distribution):
def __init__(self, connection=None, config=None, domain_name='',
id='', last_modified_time=None, status=''):
Distribution.__init__(self, connection, config, domain_name,
id, last_modified_time, status)
self._object_class = StreamingObject
def startElement(self, name, attrs, connection):
if name == 'StreamingDistributionConfig':
self.config = StreamingDistributionConfig()
return self.config
else:
return Distribution.startElement(self, name, attrs, connection)
def update(self, enabled=None, cnames=None, comment=None):
"""
Update the configuration of the StreamingDistribution. The only values
of the StreamingDistributionConfig that can be directly updated are:
* CNAMES
* Comment
* Whether the Distribution is enabled or not
Any changes to the ``trusted_signers`` or ``origin`` properties of
this distribution's current config object will also be included in
the update. Therefore, to set the origin access identity for this
distribution, set
``StreamingDistribution.config.origin.origin_access_identity``
before calling this update method.
:type enabled: bool
:param enabled: Whether the StreamingDistribution is active or not.
:type cnames: list of str
:param cnames: The DNS CNAME's associated with this
Distribution. Maximum of 10 values.
:type comment: str or unicode
:param comment: The comment associated with the Distribution.
"""
new_config = StreamingDistributionConfig(self.connection,
self.config.origin,
self.config.enabled,
self.config.caller_reference,
self.config.cnames,
self.config.comment,
self.config.trusted_signers)
if enabled != None:
new_config.enabled = enabled
if cnames != None:
new_config.cnames = cnames
if comment != None:
new_config.comment = comment
self.etag = self.connection.set_streaming_distribution_config(self.id,
self.etag,
new_config)
self.config = new_config
self._object_class = StreamingObject
def delete(self):
self.connection.delete_streaming_distribution(self.id, self.etag)
|
{
"content_hash": "466946ce8d1bdfc67e0528f33eae0915",
"timestamp": "",
"source": "github",
"line_count": 726,
"max_line_length": 137,
"avg_line_length": 41.09228650137741,
"alnum_prop": 0.56112358797305,
"repo_name": "jameslegg/boto",
"id": "fb8309f266467d307ba145996841dbf2d2a79b70",
"size": "30937",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "boto/cloudfront/distribution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3224"
},
{
"name": "Python",
"bytes": "4227744"
},
{
"name": "Shell",
"bytes": "3071"
}
],
"symlink_target": ""
}
|
"""
.. module: security_monkey.watcher
:platform: Unix
:synopsis: Slurps the current config from AWS and compares it to what has previously
been recorded in the database to find any changes.
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <pkelley@netflix.com> @monkeysecurity
"""
from common.utils.PolicyDiff import PolicyDiff
from common.utils.utils import sub_dict
from security_monkey import app
from security_monkey.datastore import Account
from security_monkey.datastore import IgnoreListEntry, Technology
from security_monkey.common.jinja import get_jinja_env
from boto.exception import BotoServerError
import time
import datastore
from sets import Set
from copy import deepcopy
import dpath.util
from dpath.exceptions import PathNotFound
class Watcher(object):
"""Slurps the current config from AWS and compares it to what has previously
been recorded in the database to find any changes."""
index = 'abstract'
i_am_singular = 'Abstract'
i_am_plural = 'Abstracts'
rate_limit_delay = 0
ignore_list = []
interval = 15 #in minutes
def __init__(self, accounts=None, debug=False):
"""Initializes the Watcher"""
self.datastore = datastore.Datastore()
if not accounts:
accounts = Account.query.filter(Account.third_party==False).filter(Account.active==True).all()
self.accounts = [account.name for account in accounts]
else:
self.accounts = accounts
self.debug = debug
self.created_items = []
self.deleted_items = []
self.changed_items = []
self.ephemeral_items = []
# TODO: grab these from DB, keyed on account
self.rate_limit_delay = 0
self.interval = 15
self.honor_ephemerals = False
self.ephemeral_paths = []
def prep_for_slurp(self):
"""
Should be run before slurp is run to grab the IgnoreList.
"""
query = IgnoreListEntry.query
query = query.join((Technology, Technology.id == IgnoreListEntry.tech_id))
self.ignore_list = query.filter(Technology.name==self.index).all()
def check_ignore_list(self, name):
"""
See if the given item has a name flagging it to be ignored by security_monkey.
"""
for result in self.ignore_list:
if name.lower().startswith(result.prefix.lower()):
app.logger.warn("Ignoring {}/{} because of IGNORELIST prefix {}".format(self.index, name, result.prefix))
return True
return False
def wrap_aws_rate_limited_call(self, awsfunc, *args, **nargs):
attempts = 0
while True:
attempts = attempts + 1
try:
if self.rate_limit_delay > 0:
time.sleep(self.rate_limit_delay)
retval = awsfunc(*args, **nargs)
if self.rate_limit_delay > 0:
app.logger.warn(("Successfully Executed Rate-Limited Function. " +
"Tech: {} Account: {}. "
"Reducing sleep period from {} to {}")
.format(self.index, self.accounts, self.rate_limit_delay, self.rate_limit_delay / 2))
self.rate_limit_delay = self.rate_limit_delay / 2
return retval
except BotoServerError as e:
if e.error_code == 'Throttling':
if self.rate_limit_delay == 0:
self.rate_limit_delay = 1
app.logger.warn(('Being rate-limited by AWS. Increasing delay on tech {} ' +
'in account {} from 0 to 1 second. Attempt {}')
.format(self.index, self.accounts, attempts))
elif self.rate_limit_delay < 16:
self.rate_limit_delay = self.rate_limit_delay * 2
app.logger.warn(('Still being rate-limited by AWS. Increasing delay on tech {} ' +
'in account {} to {} seconds. Attempt {}')
.format(self.index, self.accounts, self.rate_limit_delay, attempts))
else:
raise e
else:
raise e
def created(self):
"""
Used by the Jinja templates
:returns: True if created_items is not empty
:returns: False otherwise.
"""
return len(self.created_items) > 0
def deleted(self):
"""
Used by the Jinja templates
:returns: True if deleted_items is not empty
:returns: False otherwise.
"""
return len(self.deleted_items) > 0
def changed(self):
"""
Used by the Jinja templates
:returns: True if changed_items is not empty
:returns: False otherwise.
"""
return len(self.changed_items) > 0
def slurp(self):
"""
method to slurp configuration from AWS for whatever it is that I'm
interested in. This will be overriden for each technology.
"""
raise NotImplementedError()
def slurp_exception(self, location=None, exception=None, exception_map={}):
"""
Logs any exceptions that happen in slurp and adds them to the exception_map
using their location as the key. The location is a tuple in the form:
(technology, account, region, item_name) that describes the object where the exception occured.
Location can also exclude an item_name if the exception is region wide.
"""
if location in exception_map:
app.logger.debug("Exception map already has location {}. This should not happen.".format(location))
exception_map[location] = exception
app.logger.debug("Adding {} to the exceptions list. Exception was: {}".format(location, str(exception)))
def locationInExceptionMap(self, item_location, exception_map={}):
"""
Determines whether a given location is covered by an exception already in the
exception map.
Item location: (self.index, self.account, self.region, self.name)
exception Maps: (index, account, region, name)
(index, account, region)
(index, account)
:returns: True if location is covered by an entry in the exception map.
:returns: False if location is not covered by an entry in the exception map.
"""
# Exact Match
if item_location in exception_map:
app.logger.debug("Skipping {} due to an item-level exception {}.".format(item_location, exception_map[item_location]))
return True
# (index, account, region)
if item_location[0:3] in exception_map:
app.logger.debug("Skipping {} due to an region-level exception {}.".format(item_location, exception_map[item_location[0:3]]))
return True
# (index, account)
if item_location[0:2] in exception_map:
app.logger.debug("Skipping {} due to an account-level exception {}.".format(item_location, exception_map[item_location[0:2]]))
return True
# (index)
if item_location[0:1] in exception_map:
app.logger.debug("Skipping {} due to an technology-level exception {}.".format(item_location, exception_map[item_location[0:1]]))
return True
return False
def find_deleted(self, previous=[], current=[], exception_map={}):
"""
Find any items that have been deleted since the last run of the watcher.
Add these items to the deleted_items list.
"""
prev_map = {item.location(): item for item in previous}
curr_map = {item.location(): item for item in current}
item_locations = list(Set(prev_map).difference(Set(curr_map)))
item_locations = [item_location for item_location in item_locations if not self.locationInExceptionMap(item_location, exception_map)]
list_deleted_items = [prev_map[item] for item in item_locations]
for item in list_deleted_items:
deleted_change_item = ChangeItem.from_items(old_item=item, new_item=None)
app.logger.debug("%s: %s/%s/%s deleted" % (self.i_am_singular, item.account, item.region, item.name))
self.deleted_items.append(deleted_change_item)
def find_new(self, previous=[], current=[]):
"""
Find any new objects that have been created since the last run of the watcher.
Add these items to the created_items list.
"""
prev_map = {item.location(): item for item in previous}
curr_map = {item.location(): item for item in current}
item_locations = list(Set(curr_map).difference(Set(prev_map)))
list_new_items = [curr_map[item] for item in item_locations]
for item in list_new_items:
new_change_item = ChangeItem.from_items(old_item=None, new_item=item)
self.created_items.append(new_change_item)
app.logger.debug("%s: %s/%s/%s created" % (self.i_am_singular, item.account, item.region, item.name))
def find_modified(self, previous=[], current=[], exception_map={}):
"""
Find any objects that have been changed since the last run of the watcher.
Add these items to the changed_items list.
"""
prev_map = {item.location(): item for item in previous}
curr_map = {item.location(): item for item in current}
item_locations = list(Set(curr_map).intersection(Set(prev_map)))
item_locations = [item_location for item_location in item_locations if not self.locationInExceptionMap(item_location, exception_map)]
for location in item_locations:
prev_item = prev_map[location]
curr_item = curr_map[location]
# ChangeItem with and without ephemeral changes
eph_change_item = None
dur_change_item = None
if not sub_dict(prev_item.config) == sub_dict(curr_item.config):
eph_change_item = ChangeItem.from_items(old_item=prev_item, new_item=curr_item)
if self.ephemerals_skipped():
# deepcopy configs before filtering
dur_prev_item = deepcopy(prev_item)
dur_curr_item = deepcopy(curr_item)
# filter-out ephemeral paths in both old and new config dicts
for path in self.ephemeral_paths:
for cfg in [dur_prev_item.config, dur_curr_item.config]:
try:
dpath.util.delete(cfg, path, separator='$')
except PathNotFound:
pass
# now, compare only non-ephemeral paths
if not sub_dict(dur_prev_item.config) == sub_dict(dur_curr_item.config):
dur_change_item = ChangeItem.from_items(old_item=dur_prev_item, new_item=dur_curr_item)
# store all changes, divided in specific categories
if eph_change_item:
self.ephemeral_items.append(eph_change_item)
app.logger.debug("%s: ephemeral changes in item %s/%s/%s" % (self.i_am_singular, eph_change_item.account, eph_change_item.region, eph_change_item.name))
if dur_change_item:
self.changed_items.append(dur_change_item)
app.logger.debug("%s: durable changes in item %s/%s/%s" % (self.i_am_singular, dur_change_item.account, dur_change_item.region, dur_change_item.name))
elif eph_change_item is not None:
# store all changes, handle them all equally
self.changed_items.append(eph_change_item)
app.logger.debug("%s: changes in item %s/%s/%s" % (self.i_am_singular, eph_change_item.account, eph_change_item.region, eph_change_item.name))
def find_changes(self, current=[], exception_map={}):
"""
Identify changes between the configuration I have and what I had
last time the watcher ran.
This ignores any account/region which caused an exception during slurp.
"""
prev = self.read_previous_items()
self.find_deleted(previous=prev, current=current, exception_map=exception_map)
self.find_new(previous=prev, current=current)
self.find_modified(previous=prev, current=current, exception_map=exception_map)
def read_previous_items(self):
"""
Pulls the last-recorded configuration from the database.
:return: List of all items for the given technology and the given account.
"""
prev_list = []
for account in self.accounts:
prev = self.datastore.get_all_ctype_filtered(tech=self.index, account=account, include_inactive=False)
# Returns a map of {Item: ItemRevision}
for item in prev:
item_revision = prev[item]
new_item = ChangeItem(index=self.index,
region=item.region,
account=item.account.name,
name=item.name,
new_config=item_revision.config)
prev_list.append(new_item)
return prev_list
def get_latest_config(self, config_dict):
"""
config_dict is a dict indexed by timestamp, with configuration as the value;
:return: the latest configuration (based on the timestamp)
"""
timestamps = config_dict.keys()
timestamps.sort()
latest = timestamps[-1]
return config_dict[latest]
def is_changed(self):
"""
:return: boolean whether or not we've found any changes
"""
return self.deleted_items or self.created_items or self.changed_items
def issues_found(self):
"""
Runs through any changed items to see if any have issues.
:return: boolean whether any changed items have issues
"""
has_issues = False
has_new_issue = False
has_unjustified_issue = False
for item in self.created_items + self.changed_items:
if item.audit_issues:
has_issues = True
if item.found_new_issue:
has_new_issue = True
has_unjustified_issue = True
break
for issue in item.confirmed_existing_issues:
if not issue.justified:
has_unjustified_issue = True
break
return has_issues, has_new_issue, has_unjustified_issue
def save(self):
"""
save new configs, if necessary
"""
app.logger.info("{} deleted {} in {}".format(len(self.deleted_items), self.i_am_plural, self.accounts))
app.logger.info("{} created {} in {}".format(len(self.created_items), self.i_am_plural, self.accounts))
for item in self.created_items + self.deleted_items:
item.save(self.datastore)
if self.ephemerals_skipped():
changes_tot = len(self.ephemeral_items)
changeset = self.ephemeral_items
else:
changes_tot = len(self.changed_items)
changeset = self.changed_items
app.logger.info("{} changed {} in {}".format(changes_tot, self.i_am_plural, self.accounts))
for item in changeset:
item.save(self.datastore)
def plural_name(self):
"""
Used for Jinja Template
:return: i_am_plural
"""
return self.i_am_plural
def singular_name(self):
"""
Used for Jinja Template
:return: i_am_singular
"""
return self.i_am_singular
def get_interval(self):
""" Returns interval time (in minutes) """
return self.interval
def ephemerals_skipped(self):
""" Returns whether ephemerals locations are ignored """
return self.honor_ephemerals
class ChangeItem(object):
"""
Object tracks two different revisions of a given item.
"""
def __init__(self, index=None, region=None, account=None, name=None, old_config={}, new_config={}, active=False, audit_issues=None):
self.index = index
self.region = region
self.account = account
self.name = name
self.old_config = old_config
self.new_config = new_config
self.active = active
self.audit_issues = audit_issues or []
self.confirmed_new_issues = []
self.confirmed_fixed_issues = []
self.confirmed_existing_issues = []
self.found_new_issue = False
@classmethod
def from_items(cls, old_item=None, new_item=None):
"""
Create ChangeItem from two separate items.
:return: An instance of ChangeItem
"""
if not old_item and not new_item:
return
valid_item = new_item if new_item else old_item
active = True if new_item else False
old_config = old_item.config if old_item else {}
new_config = new_item.config if new_item else {}
return cls(index=valid_item.index,
region=valid_item.region,
account=valid_item.account,
name=valid_item.name,
old_config=old_config,
new_config=new_config,
active=active,
audit_issues=valid_item.audit_issues)
@property
def config(self):
return self.new_config
def location(self):
"""
Construct a location from the object.
:return: tuple containing index, account, region, and name.
"""
return (self.index, self.account, self.region, self.name)
def get_pdiff_html(self):
pdiff = PolicyDiff(self.new_config, self.old_config)
return pdiff.produceDiffHTML()
def _dict_for_template(self):
return {
'account': self.account,
'region': self.region,
'name': self.name,
'confirmed_new_issues': self.confirmed_new_issues,
'confirmed_fixed_issues': self.confirmed_fixed_issues,
'confirmed_existing_issues': self.confirmed_existing_issues,
'pdiff_html': self.get_pdiff_html()
}
def description(self):
"""
Provide an HTML description of the object for change emails and the Jinja templates.
:return: string of HTML desribing the object.
"""
jenv = get_jinja_env()
template = jenv.get_template('jinja_change_item.html')
body = template.render(self._dict_for_template())
# app.logger.info(body)
return body
def save(self, datastore):
"""
Save the item
"""
app.logger.debug("Saving {}/{}/{}/{}\n\t{}".format(self.index, self.account, self.region, self.name, self.new_config))
datastore.store(self.index, self.region, self.account, self.name, self.active, self.new_config, new_issues=self.audit_issues)
|
{
"content_hash": "374e75372b1900fe1384746046b785ed",
"timestamp": "",
"source": "github",
"line_count": 468,
"max_line_length": 172,
"avg_line_length": 41.22008547008547,
"alnum_prop": 0.5879425638898969,
"repo_name": "airbnb/security_monkey",
"id": "a8e5688c8fb3453c4178afa40fb906780387749c",
"size": "19291",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "security_monkey/watcher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22086"
},
{
"name": "Dart",
"bytes": "81616"
},
{
"name": "HTML",
"bytes": "76597"
},
{
"name": "JavaScript",
"bytes": "8629"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "451590"
},
{
"name": "Shell",
"bytes": "16916"
}
],
"symlink_target": ""
}
|
from flask import request
from flask_httpauth import HTTPBasicAuth, HTTPTokenAuth
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as JWT
from app.haulers.models import Haulers, HaulersSchema, db
from config import SECRET_KEY, GH_VD_SECRET
import hashlib
from sqlalchemy.sql import text
jwt = JWT(SECRET_KEY, expires_in=3600)
basic_auth = HTTPBasicAuth()
token_auth = HTTPTokenAuth('Bearer')
@basic_auth.verify_password
def verify_password(username, password):
if len(username) > 0 and len(password) > 0:
hauler = Haulers.query.with_entities(Haulers.password, Haulers.HAULER_ID).filter_by(email = username, password = password).first()
if hauler and hauler.password and check_password_hash(generate_password_hash(hauler.password), password):
return True
# Reps Login
query = db.engine.execute(text("SELECT email, password FROM haulers_representative WHERE email=:e AND password=:p"), {'e':username, 'p':password})
data = query.fetchall()
if data:
for row in data:
if row.password and check_password_hash(generate_password_hash(row.password), password):
return True
return False
@token_auth.verify_token
def verify_token(token):
try:
data = jwt.loads(token)
except:
return False
if 'HAULER_ID' in data:
return True
return False
class Auth():
def find_between( s, first, last ):
try:
start = s.index( first ) + len( first )
end = s.index( last, start )
return s[start:end]
except ValueError:
return ""
def setToken(HAULER_ID):
if not HAULER_ID:
raise ValueError("Invalid Vendor ID")
user_token = jwt.dumps( {'HAULER_ID': HAULER_ID} )
return Auth.find_between( str(user_token), "'", "'" )
def login(username, password):
if len(username) > 0 and len(password) > 0:
hauler = Haulers.query.with_entities(Haulers.password, Haulers.HAULER_ID).filter_by(email = username, password = password).first()
if hauler and hauler.password and check_password_hash(generate_password_hash(hauler.password), password):
user_token = jwt.dumps( {'HAULER_ID': hauler.HAULER_ID} )
return Auth.setToken( hauler.HAULER_ID )
# Reps Login
query = db.engine.execute(text("SELECT email, password, HAULER_ID FROM haulers_representative WHERE email=:e AND password=:p"), {'e':username, 'p':password})
data = query.fetchall()
if data:
for row in data:
if row.password and check_password_hash(generate_password_hash(row.password), password):
user_token = jwt.dumps( {'HAULER_ID': row.HAULER_ID} )
return Auth.setToken( row.HAULER_ID )
return False
def loginByToken(token):
token = token.strip()
if token:
key = GH_VD_SECRET
query = db.engine.execute("SELECT HAULER_ID, email, password FROM haulers WHERE MD5(CONCAT(HAULER_ID, email,'" + key + "')) = '"+ token +"'" )
hauler = query.fetchone()
if hauler:
user_token = jwt.dumps( {'HAULER_ID': hauler.HAULER_ID} )
return Auth.setToken( hauler.HAULER_ID )
return False
def validateSignupToken(token):
token = token.strip()
if token:
key = GH_VD_SECRET
query = db.engine.execute("SELECT HAULER_ID, email, password FROM haulers WHERE MD5(CONCAT(HAULER_ID, email,'" + key + "')) = '"+ token +"'" )
hauler = query.fetchone()
if hauler:
if hauler.password:
raise ValueError("token expired, Vendor already has an account.")
return(hauler.HAULER_ID)
#raise ValueError("Invalid token")
return False
def validateRepsSignupToken(token):
token = token.strip()
if token:
key = GH_VD_SECRET
query = db.engine.execute("SELECT id, HAULER_ID, email, password FROM haulers_representative WHERE MD5(CONCAT(id, email,'" + key + "')) = '"+ token +"'" )
hauler = query.fetchone()
if hauler:
if hauler.password:
raise ValueError("token expired, Vendor Representative already has an account.")
return(hauler.id)
raise ValueError("Invalid token")
class Security():
def getHaulerId(token=False):
if token:
access_token = token
else:
access_token = request.headers.get('authorization')
if access_token:
access_token = access_token.replace('Bearer', '').strip()
if len(access_token):
data = jwt.loads(access_token)
if 'HAULER_ID' in data:
return data['HAULER_ID']
return false
|
{
"content_hash": "c5cfbe189d7705a344f8d068be7d931e",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 181,
"avg_line_length": 44.08730158730159,
"alnum_prop": 0.5449144914491449,
"repo_name": "konstantinKim/vd-backend",
"id": "deb6f3912b330d096222e50c5c5ab90daf648061",
"size": "5555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/auth/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "190141"
}
],
"symlink_target": ""
}
|
import sys
import argparse
from PyQt4 import QtGui
from PyQt4 import QtCore
app = QtGui.QApplication(sys.argv)
class ScreenShot(QtGui.QWidget):
def __init__(self):
super(ScreenShot, self).__init__()
self.outPath = None
screen_rect = app.desktop().screenGeometry()
width, height = screen_rect.width()*2, screen_rect.height()
self.setGeometry(0, 0, width, height)
self.setWindowTitle('Screen Capture')
self.setCursor(QtCore.Qt.CrossCursor)
self.setWindowOpacity(0.1)
self.rubberband = QtGui.QRubberBand(QtGui.QRubberBand.Rectangle, self)
self.setMouseTracking(True)
def mousePressEvent(self, event):
self.origin = event.pos()
self.rubberband.setGeometry(QtCore.QRect(self.origin, QtCore.QSize()))
self.rubberband.show()
QtGui.QWidget.mousePressEvent(self, event)
def mouseMoveEvent(self, event):
if self.rubberband.isVisible():
self.rubberband.setGeometry(
QtCore.QRect(self.origin, event.pos()).normalized())
QtGui.QWidget.mouseMoveEvent(self, event)
def mouseReleaseEvent(self, event):
if self.rubberband.isVisible():
self.rubberband.hide()
selected = []
rect = self.rubberband.geometry()
desktop = QtGui.QApplication.instance().desktop()
imgmap = QtGui.QPixmap.grabWindow(desktop.winId(), rect.x(), rect.y(), rect.width(), rect.height())
imgmap.save(self.outPath)
sys.exit()
QtGui.QWidget.mouseReleaseEvent(self, event)
def launch(self, imagePath):
self.outPath = imagePath
self.show()
def captureRegion(outImagePath):
app = QtGui.QApplication(sys.argv)
lch = ScreenShot()
lch.launch(outImagePath)
sys.exit(app.exec_())
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Simple tool to take screenshot can be used as a module also')
parser.add_argument('-o','--out', help='output path for image to be saved', required=True)
args = parser.parse_args()
lch = ScreenShot()
lch.launch(args.out)
sys.exit(app.exec_())
|
{
"content_hash": "c542d4aa4ae5af79a3f500166926a269",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 111,
"avg_line_length": 35.12903225806452,
"alnum_prop": 0.6437098255280074,
"repo_name": "cgarjun/utilities",
"id": "0b55e3df771e24e1ea84f9c8578a0a036e66ee42",
"size": "2178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "screenshot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4411"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class TextCkeditorConfig(AppConfig):
name = 'djangocms_text_ckeditor'
verbose_name = 'django CMS Text CKEditor'
|
{
"content_hash": "b1ed64c51167d6a392048bf14f2f2168",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 45,
"avg_line_length": 26,
"alnum_prop": 0.7564102564102564,
"repo_name": "yakky/djangocms-text-ckeditor",
"id": "aa44b17507b75d6d45db01e82f70ec5ebc32daff",
"size": "180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangocms_text_ckeditor/apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "14352"
},
{
"name": "JavaScript",
"bytes": "21502"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "113171"
}
],
"symlink_target": ""
}
|
from google.cloud import binaryauthorization_v1
def sample_create_attestor():
# Create a client
client = binaryauthorization_v1.BinauthzManagementServiceV1Client()
# Initialize request argument(s)
attestor = binaryauthorization_v1.Attestor()
attestor.user_owned_grafeas_note.note_reference = "note_reference_value"
attestor.name = "name_value"
request = binaryauthorization_v1.CreateAttestorRequest(
parent="parent_value",
attestor_id="attestor_id_value",
attestor=attestor,
)
# Make the request
response = client.create_attestor(request=request)
# Handle the response
print(response)
# [END binaryauthorization_v1_generated_BinauthzManagementServiceV1_CreateAttestor_sync]
|
{
"content_hash": "39c34002a45c25f56ee1bfd015db71ab",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 88,
"avg_line_length": 30.12,
"alnum_prop": 0.7330677290836654,
"repo_name": "googleapis/python-binary-authorization",
"id": "e51ea8d06cd61d2688e655321b6c72845d9a48c5",
"size": "2178",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/binaryauthorization_v1_generated_binauthz_management_service_v1_create_attestor_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1055955"
},
{
"name": "Shell",
"bytes": "30702"
}
],
"symlink_target": ""
}
|
class Student:
def __init__(self, name):
self.name = tuple(name.split())[::-1]
myStudentList = []
def addStudent(name):
myStudentList.append(Student(name))
def removeStudent(name):
for student in myStudentList:
if tuple(student.name) == tuple(name.split())[::-1]:
myStudentList.remove(student)
del(student)
def displayStudent():
for student in myStudentList:
for name in student.name:
print name,
print
while True:
print "\n\n\n"
print "Select an option:"
print "1. Add a student"
print "2. Delete student"
print "3. Display all students"
choice = raw_input("Enter your choice : ")
if choice == "1":
name = raw_input("Enter full name : ")
addStudent(name)
elif choice == "2":
name = raw_input("Enter full name : ")
removeStudent(name)
elif choice == "3":
displayStudent()
else:
print "Invalid Choice"
|
{
"content_hash": "3891530c0d8ec75afa977a9ad6e2ad69",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 60,
"avg_line_length": 24.5,
"alnum_prop": 0.5846938775510204,
"repo_name": "pranavsb/Python-SIG-2015",
"id": "07df948d2651fe6587bd07d5a9bcd39b262d5c75",
"size": "1394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "final_exam/soln7.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30008"
}
],
"symlink_target": ""
}
|
from base_classes import *
from config import *
from dadict import *
from data import *
from error import *
from themes import *
from arch import *
from plist import *
from fields import *
from packet import *
from asn1fields import *
from asn1packet import *
from utils import *
from route import *
if conf.ipv6_enabled:
from utils6 import *
from route6 import *
from sendrecv import *
from supersocket import *
from volatile import *
from as_resolvers import *
from ansmachine import *
from automaton import *
from autorun import *
from main import *
from layers.all import *
from asn1.asn1 import *
from asn1.ber import *
from asn1.mib import *
|
{
"content_hash": "aab684ad02f3d35e2116b78d55dec708",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 26,
"avg_line_length": 17.5,
"alnum_prop": 0.7473684210526316,
"repo_name": "RiskSense-Ops/CVE-2016-6366",
"id": "fd64c33ba60ff96ce5b394d95755dc6ed92ab78d",
"size": "865",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "extrabacon-2.0/scapy/all.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "6724"
},
{
"name": "Python",
"bytes": "1201686"
},
{
"name": "Ruby",
"bytes": "12603"
}
],
"symlink_target": ""
}
|
"""The IPython HTML Notebook"""
# check for tornado 2.1.0
msg = "The IPython Notebook requires tornado >= 2.1.0"
try:
import tornado
except ImportError:
raise ImportError(msg)
try:
version_info = tornado.version_info
except AttributeError:
raise ImportError(msg + ", but you have < 1.1.0")
if version_info < (2,1,0):
raise ImportError(msg + ", but you have %s" % tornado.version)
del msg
# check for pyzmq 2.1.4
from IPython.zmq import check_for_zmq
check_for_zmq('2.1.4', 'IPython.frontend.html.notebook')
del check_for_zmq
|
{
"content_hash": "6171c8fb0d1522b91d6675b493ae8cc5",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 66,
"avg_line_length": 27.35,
"alnum_prop": 0.70018281535649,
"repo_name": "sodafree/backend",
"id": "bfa5317531ca44c41d5af84be1a01cec330c3382",
"size": "547",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "build/ipython/build/lib.linux-i686-2.7/IPython/frontend/html/notebook/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Emacs Lisp",
"bytes": "21800"
},
{
"name": "JavaScript",
"bytes": "1050184"
},
{
"name": "Python",
"bytes": "21215906"
},
{
"name": "Shell",
"bytes": "7557"
},
{
"name": "VimL",
"bytes": "25012"
}
],
"symlink_target": ""
}
|
import random
import requests
import shutil
import logging
import os
import traceback
import ujson
from typing import List, Dict, Any, Optional, Set, Callable, Iterable, Tuple, TypeVar
from django.forms.models import model_to_dict
from zerver.models import Realm, RealmEmoji, Subscription, Recipient, \
Attachment, Stream, Message, UserProfile
from zerver.data_import.sequencer import NEXT_ID
from zerver.lib.actions import STREAM_ASSIGNMENT_COLORS as stream_colors
from zerver.lib.avatar_hash import user_avatar_path_from_ids
from zerver.lib.parallel import run_parallel, JobData
# stubs
ZerverFieldsT = Dict[str, Any]
def build_zerver_realm(realm_id: int, realm_subdomain: str, time: float,
other_product: str) -> List[ZerverFieldsT]:
realm = Realm(id=realm_id, date_created=time,
name=realm_subdomain, string_id=realm_subdomain,
description=("Organization imported from %s!" % (other_product)))
auth_methods = [[flag[0], flag[1]] for flag in realm.authentication_methods]
realm_dict = model_to_dict(realm, exclude='authentication_methods')
realm_dict['authentication_methods'] = auth_methods
return[realm_dict]
def build_user_profile(avatar_source: str,
date_joined: Any,
delivery_email: str,
email: str,
full_name: str,
id: int,
is_active: bool,
is_realm_admin: bool,
is_guest: bool,
is_mirror_dummy: bool,
realm_id: int,
short_name: str,
timezone: Optional[str]) -> ZerverFieldsT:
pointer = -1
obj = UserProfile(
avatar_source=avatar_source,
date_joined=date_joined,
delivery_email=delivery_email,
email=email,
full_name=full_name,
id=id,
is_active=is_active,
is_realm_admin=is_realm_admin,
is_guest=is_guest,
pointer=pointer,
realm_id=realm_id,
short_name=short_name,
timezone=timezone,
)
dct = model_to_dict(obj)
return dct
def build_avatar(zulip_user_id: int, realm_id: int, email: str, avatar_url: str,
timestamp: Any, avatar_list: List[ZerverFieldsT]) -> None:
avatar = dict(
path=avatar_url, # Save original avatar url here, which is downloaded later
realm_id=realm_id,
content_type=None,
user_profile_id=zulip_user_id,
last_modified=timestamp,
user_profile_email=email,
s3_path="",
size="")
avatar_list.append(avatar)
def make_subscriber_map(zerver_subscription: List[ZerverFieldsT]) -> Dict[int, Set[int]]:
'''
This can be convenient for building up UserMessage
rows.
'''
subscriber_map = dict() # type: Dict[int, Set[int]]
for sub in zerver_subscription:
user_id = sub['user_profile']
recipient_id = sub['recipient']
if recipient_id not in subscriber_map:
subscriber_map[recipient_id] = set()
subscriber_map[recipient_id].add(user_id)
return subscriber_map
def build_subscription(recipient_id: int, user_id: int,
subscription_id: int) -> ZerverFieldsT:
subscription = Subscription(
color=random.choice(stream_colors),
id=subscription_id)
subscription_dict = model_to_dict(subscription, exclude=['user_profile', 'recipient_id'])
subscription_dict['user_profile'] = user_id
subscription_dict['recipient'] = recipient_id
return subscription_dict
def build_public_stream_subscriptions(
zerver_userprofile: List[ZerverFieldsT],
zerver_recipient: List[ZerverFieldsT],
zerver_stream: List[ZerverFieldsT]) -> List[ZerverFieldsT]:
'''
This function is only used for Hipchat now, but it may apply to
future conversions. We often don't get full subscriber data in
the Hipchat export, so this function just autosubscribes all
users to every public stream. This returns a list of Subscription
dicts.
'''
subscriptions = [] # type: List[ZerverFieldsT]
public_stream_ids = {
stream['id']
for stream in zerver_stream
if not stream['invite_only']
}
public_stream_recipient_ids = {
recipient['id']
for recipient in zerver_recipient
if recipient['type'] == Recipient.STREAM
and recipient['type_id'] in public_stream_ids
}
user_ids = [
user['id']
for user in zerver_userprofile
]
for recipient_id in public_stream_recipient_ids:
for user_id in user_ids:
subscription = build_subscription(
recipient_id=recipient_id,
user_id=user_id,
subscription_id=NEXT_ID('subscription'),
)
subscriptions.append(subscription)
return subscriptions
def build_private_stream_subscriptions(
get_users: Callable[..., Set[int]],
zerver_recipient: List[ZerverFieldsT],
zerver_stream: List[ZerverFieldsT]) -> List[ZerverFieldsT]:
subscriptions = [] # type: List[ZerverFieldsT]
stream_ids = {
stream['id']
for stream in zerver_stream
if stream['invite_only']
}
recipient_map = {
recipient['id']: recipient['type_id'] # recipient_id -> stream_id
for recipient in zerver_recipient
if recipient['type'] == Recipient.STREAM
and recipient['type_id'] in stream_ids
}
for recipient_id, stream_id in recipient_map.items():
user_ids = get_users(stream_id=stream_id)
for user_id in user_ids:
subscription = build_subscription(
recipient_id=recipient_id,
user_id=user_id,
subscription_id=NEXT_ID('subscription'),
)
subscriptions.append(subscription)
return subscriptions
def build_personal_subscriptions(zerver_recipient: List[ZerverFieldsT]) -> List[ZerverFieldsT]:
subscriptions = [] # type: List[ZerverFieldsT]
personal_recipients = [
recipient
for recipient in zerver_recipient
if recipient['type'] == Recipient.PERSONAL
]
for recipient in personal_recipients:
recipient_id = recipient['id']
user_id = recipient['type_id']
subscription = build_subscription(
recipient_id=recipient_id,
user_id=user_id,
subscription_id=NEXT_ID('subscription'),
)
subscriptions.append(subscription)
return subscriptions
def build_recipient(type_id: int, recipient_id: int, type: int) -> ZerverFieldsT:
recipient = Recipient(
type_id=type_id, # stream id
id=recipient_id,
type=type)
recipient_dict = model_to_dict(recipient)
return recipient_dict
def build_recipients(zerver_userprofile: List[ZerverFieldsT],
zerver_stream: List[ZerverFieldsT]) -> List[ZerverFieldsT]:
'''
As of this writing, we only use this in the HipChat
conversion. The Slack and Gitter conversions do it more
tightly integrated with creating other objects.
'''
recipients = []
for user in zerver_userprofile:
type_id = user['id']
type = Recipient.PERSONAL
recipient = Recipient(
type_id=type_id,
id=NEXT_ID('recipient'),
type=type,
)
recipient_dict = model_to_dict(recipient)
recipients.append(recipient_dict)
for stream in zerver_stream:
type_id = stream['id']
type = Recipient.STREAM
recipient = Recipient(
type_id=type_id,
id=NEXT_ID('recipient'),
type=type,
)
recipient_dict = model_to_dict(recipient)
recipients.append(recipient_dict)
return recipients
def build_realm(zerver_realm: List[ZerverFieldsT], realm_id: int,
domain_name: str) -> ZerverFieldsT:
realm = dict(zerver_client=[{"name": "populate_db", "id": 1},
{"name": "website", "id": 2},
{"name": "API", "id": 3}],
zerver_customprofilefield=[],
zerver_customprofilefieldvalue=[],
zerver_userpresence=[], # shows last logged in data, which is not available
zerver_userprofile_mirrordummy=[],
zerver_realmdomain=[{"realm": realm_id,
"allow_subdomains": False,
"domain": domain_name,
"id": realm_id}],
zerver_useractivity=[],
zerver_realm=zerver_realm,
zerver_huddle=[],
zerver_userprofile_crossrealm=[],
zerver_useractivityinterval=[],
zerver_reaction=[],
zerver_realmemoji=[],
zerver_realmfilter=[])
return realm
def build_usermessages(zerver_usermessage: List[ZerverFieldsT],
subscriber_map: Dict[int, Set[int]],
recipient_id: int,
mentioned_user_ids: List[int],
message_id: int,
long_term_idle: Optional[Set[int]]=None) -> Tuple[int, int]:
user_ids = subscriber_map.get(recipient_id, set())
if long_term_idle is None:
long_term_idle = set()
user_messages_created = 0
user_messages_skipped = 0
if user_ids:
for user_id in sorted(user_ids):
is_mentioned = user_id in mentioned_user_ids
# Slack and Gitter don't yet triage private messages.
# It's possible we don't even get PMs from them.
is_private = False
if not is_mentioned and not is_private and user_id in long_term_idle:
# these users are long-term idle
user_messages_skipped += 1
continue
user_messages_created += 1
usermessage = build_user_message(
user_id=user_id,
message_id=message_id,
is_private=is_private,
is_mentioned=is_mentioned,
)
zerver_usermessage.append(usermessage)
return (user_messages_created, user_messages_skipped)
def build_user_message(user_id: int,
message_id: int,
is_private: bool,
is_mentioned: bool) -> ZerverFieldsT:
flags_mask = 1 # For read
if is_mentioned:
flags_mask += 8 # For mentioned
if is_private:
flags_mask += 2048 # For is_private
id = NEXT_ID('user_message')
usermessage = dict(
id=id,
user_profile=user_id,
message=message_id,
flags_mask=flags_mask,
)
return usermessage
def build_defaultstream(realm_id: int, stream_id: int,
defaultstream_id: int) -> ZerverFieldsT:
defaultstream = dict(
stream=stream_id,
realm=realm_id,
id=defaultstream_id)
return defaultstream
def build_stream(date_created: Any, realm_id: int, name: str,
description: str, stream_id: int, deactivated: bool=False,
invite_only: bool=False) -> ZerverFieldsT:
stream = Stream(
name=name,
deactivated=deactivated,
description=description,
date_created=date_created,
invite_only=invite_only,
id=stream_id)
stream_dict = model_to_dict(stream,
exclude=['realm'])
stream_dict['realm'] = realm_id
return stream_dict
def build_message(topic_name: str, pub_date: float, message_id: int, content: str,
rendered_content: Optional[str], user_id: int, recipient_id: int,
has_image: bool=False, has_link: bool=False,
has_attachment: bool=True) -> ZerverFieldsT:
zulip_message = Message(
rendered_content_version=1, # this is Zulip specific
pub_date=pub_date,
id=message_id,
content=content,
rendered_content=rendered_content,
has_image=has_image,
has_attachment=has_attachment,
has_link=has_link)
zulip_message.set_topic_name(topic_name)
zulip_message_dict = model_to_dict(zulip_message,
exclude=['recipient', 'sender', 'sending_client'])
zulip_message_dict['sender'] = user_id
zulip_message_dict['sending_client'] = 1
zulip_message_dict['recipient'] = recipient_id
return zulip_message_dict
def build_attachment(realm_id: int, message_ids: Set[int],
user_id: int, fileinfo: ZerverFieldsT, s3_path: str,
zerver_attachment: List[ZerverFieldsT]) -> None:
"""
This function should be passed a 'fileinfo' dictionary, which contains
information about 'size', 'created' (created time) and ['name'] (filename).
"""
attachment_id = NEXT_ID('attachment')
attachment = Attachment(
id=attachment_id,
size=fileinfo['size'],
create_time=fileinfo['created'],
is_realm_public=True,
path_id=s3_path,
file_name=fileinfo['name'])
attachment_dict = model_to_dict(attachment,
exclude=['owner', 'messages', 'realm'])
attachment_dict['owner'] = user_id
attachment_dict['messages'] = list(message_ids)
attachment_dict['realm'] = realm_id
zerver_attachment.append(attachment_dict)
def process_avatars(avatar_list: List[ZerverFieldsT], avatar_dir: str, realm_id: int,
threads: int, size_url_suffix: str='') -> List[ZerverFieldsT]:
"""
This function gets the avatar of the user and saves it in the
user's avatar directory with both the extensions '.png' and '.original'
Required parameters:
1. avatar_list: List of avatars to be mapped in avatars records.json file
2. avatar_dir: Folder where the downloaded avatars are saved
3. realm_id: Realm ID.
We use this for Slack and Gitter conversions, where avatars need to be
downloaded. For simpler conversions see write_avatar_png.
"""
def get_avatar(avatar_upload_item: List[str]) -> None:
avatar_url = avatar_upload_item[0]
image_path = os.path.join(avatar_dir, avatar_upload_item[1])
original_image_path = os.path.join(avatar_dir, avatar_upload_item[2])
response = requests.get(avatar_url + size_url_suffix, stream=True)
with open(image_path, 'wb') as image_file:
shutil.copyfileobj(response.raw, image_file)
shutil.copy(image_path, original_image_path)
logging.info('######### GETTING AVATARS #########\n')
logging.info('DOWNLOADING AVATARS .......\n')
avatar_original_list = []
avatar_upload_list = []
for avatar in avatar_list:
avatar_hash = user_avatar_path_from_ids(avatar['user_profile_id'], realm_id)
avatar_url = avatar['path']
avatar_original = dict(avatar)
image_path = ('%s.png' % (avatar_hash))
original_image_path = ('%s.original' % (avatar_hash))
avatar_upload_list.append([avatar_url, image_path, original_image_path])
# We don't add the size field here in avatar's records.json,
# since the metadata is not needed on the import end, and we
# don't have it until we've downloaded the files anyway.
avatar['path'] = image_path
avatar['s3_path'] = image_path
avatar_original['path'] = original_image_path
avatar_original['s3_path'] = original_image_path
avatar_original_list.append(avatar_original)
# Run downloads parallely
output = []
for (status, job) in run_parallel_wrapper(get_avatar, avatar_upload_list, threads=threads):
output.append(job)
logging.info('######### GETTING AVATARS FINISHED #########\n')
return avatar_list + avatar_original_list
def write_avatar_png(avatar_folder: str,
realm_id: int,
user_id: int,
bits: bytes) -> ZerverFieldsT:
'''
Use this function for conversions like Hipchat where
the bits for the .png file come in something like
a users.json file, and where we don't have to
fetch avatar images externally.
'''
avatar_hash = user_avatar_path_from_ids(
user_profile_id=user_id,
realm_id=realm_id,
)
image_fn = avatar_hash + '.original'
image_path = os.path.join(avatar_folder, image_fn)
with open(image_path, 'wb') as image_file:
image_file.write(bits)
# Return metadata that eventually goes in records.json.
metadata = dict(
path=image_path,
s3_path=image_path,
realm_id=realm_id,
user_profile_id=user_id,
# We only write the .original file; ask the importer to do the thumbnailing.
importer_should_thumbnail=True,
)
return metadata
ListJobData = TypeVar('ListJobData')
def run_parallel_wrapper(f: Callable[[ListJobData], None], full_items: List[ListJobData],
threads: int=6) -> Iterable[Tuple[int, List[ListJobData]]]:
logging.info("Distributing %s items across %s threads" % (len(full_items), threads))
def wrapping_function(items: List[ListJobData]) -> int:
count = 0
for item in items:
try:
f(item)
except Exception:
logging.info("Error processing item: %s" % (item,))
traceback.print_exc()
count += 1
if count % 1000 == 0:
logging.info("A download thread finished %s items" % (count,))
return 0
job_lists = [full_items[i::threads] for i in range(threads)] # type: List[List[ListJobData]]
return run_parallel(wrapping_function, job_lists, threads=threads)
def process_uploads(upload_list: List[ZerverFieldsT], upload_dir: str,
threads: int) -> List[ZerverFieldsT]:
"""
This function downloads the uploads and saves it in the realm's upload directory.
Required parameters:
1. upload_list: List of uploads to be mapped in uploads records.json file
2. upload_dir: Folder where the downloaded uploads are saved
"""
def get_uploads(upload: List[str]) -> None:
upload_url = upload[0]
upload_path = upload[1]
upload_path = os.path.join(upload_dir, upload_path)
response = requests.get(upload_url, stream=True)
os.makedirs(os.path.dirname(upload_path), exist_ok=True)
with open(upload_path, 'wb') as upload_file:
shutil.copyfileobj(response.raw, upload_file)
logging.info('######### GETTING ATTACHMENTS #########\n')
logging.info('DOWNLOADING ATTACHMENTS .......\n')
upload_url_list = []
for upload in upload_list:
upload_url = upload['path']
upload_s3_path = upload['s3_path']
upload_url_list.append([upload_url, upload_s3_path])
upload['path'] = upload_s3_path
# Run downloads parallely
output = []
for (status, job) in run_parallel_wrapper(get_uploads, upload_url_list, threads=threads):
output.append(job)
logging.info('######### GETTING ATTACHMENTS FINISHED #########\n')
return upload_list
def build_realm_emoji(realm_id: int,
name: str,
id: int,
file_name: str) -> ZerverFieldsT:
return model_to_dict(
RealmEmoji(
realm_id=realm_id,
name=name,
id=id,
file_name=file_name,
)
)
def process_emojis(zerver_realmemoji: List[ZerverFieldsT], emoji_dir: str,
emoji_url_map: ZerverFieldsT, threads: int) -> List[ZerverFieldsT]:
"""
This function downloads the custom emojis and saves in the output emoji folder.
Required parameters:
1. zerver_realmemoji: List of all RealmEmoji objects to be imported
2. emoji_dir: Folder where the downloaded emojis are saved
3. emoji_url_map: Maps emoji name to its url
"""
def get_emojis(upload: List[str]) -> None:
emoji_url = upload[0]
emoji_path = upload[1]
upload_emoji_path = os.path.join(emoji_dir, emoji_path)
response = requests.get(emoji_url, stream=True)
os.makedirs(os.path.dirname(upload_emoji_path), exist_ok=True)
with open(upload_emoji_path, 'wb') as emoji_file:
shutil.copyfileobj(response.raw, emoji_file)
emoji_records = []
upload_emoji_list = []
logging.info('######### GETTING EMOJIS #########\n')
logging.info('DOWNLOADING EMOJIS .......\n')
for emoji in zerver_realmemoji:
emoji_url = emoji_url_map[emoji['name']]
emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(
realm_id=emoji['realm'],
emoji_file_name=emoji['name'])
upload_emoji_list.append([emoji_url, emoji_path])
emoji_record = dict(emoji)
emoji_record['path'] = emoji_path
emoji_record['s3_path'] = emoji_path
emoji_record['realm_id'] = emoji_record['realm']
emoji_record.pop('realm')
emoji_records.append(emoji_record)
# Run downloads parallely
output = []
for (status, job) in run_parallel_wrapper(get_emojis, upload_emoji_list, threads=threads):
output.append(job)
logging.info('######### GETTING EMOJIS FINISHED #########\n')
return emoji_records
def create_converted_data_files(data: Any, output_dir: str, file_path: str) -> None:
output_file = output_dir + file_path
os.makedirs(os.path.dirname(output_file), exist_ok=True)
with open(output_file, 'w') as fp:
ujson.dump(data, fp, indent=4)
|
{
"content_hash": "d2d9171b66a5390c3cf4182d696000e1",
"timestamp": "",
"source": "github",
"line_count": 601,
"max_line_length": 97,
"avg_line_length": 36.42595673876872,
"alnum_prop": 0.5992143248675316,
"repo_name": "dhcrzf/zulip",
"id": "d45a1277fa68898d9e5485d11167b29bd11f6a27",
"size": "21892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/data_import/import_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "436713"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "673974"
},
{
"name": "JavaScript",
"bytes": "2951950"
},
{
"name": "Perl",
"bytes": "398747"
},
{
"name": "Puppet",
"bytes": "72908"
},
{
"name": "Python",
"bytes": "6188005"
},
{
"name": "Ruby",
"bytes": "6110"
},
{
"name": "Shell",
"bytes": "118284"
},
{
"name": "TypeScript",
"bytes": "9543"
}
],
"symlink_target": ""
}
|
"""
This module contains the definition of the `Device` class, used to represent a
physical storage device connected to the system.
Once initialized, class members contain all relevant information about the
device, including its model, serial number, firmware version, and all SMART
attribute data.
Methods are provided for initiating self tests and querying their results.
"""
# Python built-ins
from __future__ import print_function
import re # Don't delete this 'un-used' import
from subprocess import Popen, PIPE
import warnings
# pySMART module imports
from .attribute import Attribute
from .test_entry import Test_Entry
from .utils import *
class Device(object):
"""
Represents any device attached to an internal storage interface, such as a
hard drive or DVD-ROM, and detected by smartmontools. Includes eSATA
(considered SATA) but excludes other external devices (USB, Firewire).
"""
def __init__(self, name, interface=None):
"""Instantiates and initializes the `pySMART.device.Device`."""
assert interface is None or interface.lower() in [
'ata', 'csmi', 'sas', 'sat', 'sata', 'scsi']
self.name = name.replace('/dev/', '')
"""
**(str):** Device's hardware ID, without the '/dev/' prefix.
(ie: sda (Linux), pd0 (Windows))
"""
if self.name[:2].lower() == 'pd':
self.name = pd_to_sd(self.name[2:])
self.model = None
"""**(str):** Device's model number."""
self.serial = None
"""**(str):** Device's serial number."""
self.interface = interface
"""
**(str):** Device's interface type. Must be one of:
* **ATA** - Advanced Technology Attachment
* **SATA** - Serial ATA
* **SCSI** - Small Computer Systems Interface
* **SAS** - Serial Attached SCSI
* **SAT** - SCSI-to-ATA Translation (SATA device plugged into a
SAS port)
* **CSMI** - Common Storage Management Interface (Intel ICH /
Matrix RAID)
Generally this should not be specified to allow auto-detection to occur.
Otherwise, this value overrides the auto-detected type and could
produce unexpected or no data.
"""
self.capacity = None
"""**(str):** Device's user capacity."""
self.firmware = None
"""**(str):** Device's firmware version."""
self.supports_smart = False
"""
**(bool):** True if the device supports SMART (or SCSI equivalent) and
has the feature set enabled. False otherwise.
"""
self.assessment = None
"""**(str):** SMART health self-assessment as reported by the device."""
self.messages = []
"""
**(list of str):** Contains any SMART warnings or other error messages
reported by the device (ie: ASCQ codes).
"""
self.is_ssd = None
"""
**(bool):** True if this device is a Solid State Drive.
False otherwise.
"""
self.attributes = [None] * 256
"""
**(list of `Attribute`):** Contains the complete SMART table information
for this device, as provided by smartctl. Indexed by attribute #,
values are set to 'None' for attributes not suported by this device.
"""
self.tests = []
"""
**(list of `Log_Entry`):** Contains the complete SMART self-test log
for this device, as provided by smartctl. If no SMART self-tests have
been recorded, contains a `None` type instead.
"""
self._test_running = False
"""
**(bool):** True if a self-test is currently being run. False otherwise.
"""
self._test_ECD = None
"""
**(str):** Estimated completion time of the running SMART selftest.
Not provided by SAS/SCSI devices.
"""
self.diags = {}
"""
**(dict of str):** Contains parsed and processed diagnostic information
extracted from the SMART information. Currently only populated for
SAS and SCSI devices, since ATA/SATA SMART attributes are manufacturer
proprietary.
"""
if self.name is None:
warnings.warn("\nDevice '{0}' does not exist! "
"This object should be destroyed.".format(name))
return
# If no interface type was provided, scan for the device
elif self.interface is None:
_grep = 'find' if OS == 'Windows' else 'grep'
cmd = Popen('smartctl --scan-open | {0} "{1}"'.format(
_grep, self.name), shell=True, stdout=PIPE, stderr=PIPE)
_stdout, _stderr = cmd.communicate()
if _stdout != '':
self.interface = _stdout.split(' ')[2]
# Disambiguate the generic interface to a specific type
self._classify()
else:
warnings.warn("\nDevice '{0}' does not exist! "
"This object should be destroyed.".format(name))
return
# If a valid device was detected, populate its information
if self.interface is not None:
self.update()
def __repr__(self):
"""Define a basic representation of the class object."""
return "<%s device on /dev/%s mod:%s sn:%s>" % (
self.interface.upper(), self.name, self.model, self.serial)
def all_attributes(self):
"""
Prints the entire SMART attribute table, in a format similar to
the output of smartctl.
"""
header_printed = False
for attr in self.attributes:
if attr is not None:
if not header_printed:
print("{0:>3} {1:24}{2:4}{3:4}{4:4}{5:9}{6:8}{7:12}"
"{8}".format(
'ID#', 'ATTRIBUTE_NAME', 'CUR', 'WST', 'THR',
'TYPE', 'UPDATED', 'WHEN_FAIL', 'RAW'))
header_printed = True
print(attr)
if not header_printed:
print("This device does not support SMART attributes.")
def all_selftests(self):
"""
Prints the entire SMART self-test log, in a format similar to
the output of smartctl.
"""
if self.tests is not None:
if smartctl_type[self.interface] == 'scsi':
print("{0:3}{1:17}{2:23}{3:7}{4:14}{5:15}".format(
'ID', 'Test Description', 'Status', 'Hours',
'1st_Error@LBA', '[SK ASC ASCQ]'))
else:
print("{0:3}{1:17}{2:30}{3:5}{4:7}{5:17}".format(
'ID', 'Test_Description', 'Status', 'Left', 'Hours',
'1st_Error@LBA'))
for test in self.tests:
print(test)
else:
print("No self-tests have been logged for this device.")
def _classify(self):
"""
Disambiguates generic device types ATA and SCSI into more specific
ATA, SATA, SAS, SAT and SCSI.
"""
# SCSI devices might be SCSI, SAS or SAT
# ATA device might be ATA or SATA
if self.interface in ['scsi', 'ata']:
if self.interface == 'scsi':
test = 'sat'
else:
test = 'sata'
# Look for a SATA PHY to detect SAT and SATA
cmd = Popen('smartctl -d {0} -l sataphy /dev/{1}'.format(
smartctl_type[test], self.name), shell=True,
stdout=PIPE, stderr=PIPE)
_stdout, _stderr = cmd.communicate()
if 'GP Log 0x11' in _stdout.split('\n')[3]:
self.interface = test
# If device type is still SCSI (not changed to SAT above), then
# check for a SAS PHY
if self.interface == 'scsi':
cmd = Popen('smartctl -d scsi -l sasphy /dev/{0}'.format(
self.name), shell=True, stdout=PIPE, stderr=PIPE)
_stdout, _stderr = cmd.communicate()
if 'SAS SSP' in _stdout.split('\n')[4]:
self.interface = 'sas'
# Some older SAS devices do not support the SAS PHY log command.
# For these, see if smartmontools reports a transport protocol.
else:
cmd = Popen('smartctl -d scsi -a /dev/{0}'.format(
self.name), shell=True, stdout=PIPE, stderr=PIPE)
_stdout, _stderr = cmd.communicate()
for line in _stdout.split('\n'):
if 'Transport protocol' in line and 'SAS' in line:
self.interface = 'sas'
def get_selftest_result(self, output=None):
"""
Refreshes a device's `pySMART.device.Device.tests` attribute to obtain
the latest test results. If a new test result is obtained, its content
is returned.
###Args:
* **output (str, optional):** If set to 'str', the string
representation of the most recent test result will be returned, instead
of a `Test_Entry` object.
##Returns:
* **(int):** Return status code. One of the following:
* 0 - Success. Object (or optionally, string rep) is attached.
* 1 - Self-test in progress. Must wait for it to finish.
* 2 - No new test results.
* **(`Test_Entry` or str):** Most recent `Test_Entry` object (or
optionally it's string representation) if new data exists. Status
message string on failure.
* **(str):** Estimated completion time of a test in-progess, if known.
Otherwise 'None'.
"""
# SCSI self-test logs hold 20 entries while ATA logs hold 21
if smartctl_type[self.interface] == 'scsi':
maxlog = 20
else:
maxlog = 21
# If we looked only at the most recent test result we could be fooled
# by two short tests run close together (within the same hour) appearing
# identical. Comparing the length of the log adds some confidence until
# it maxes, as above. Comparing the least-recent test result greatly
# diminishes the chances that two sets of two tests each were run within
# an hour of themselves, but with 16-17 other tests run in between them.
if self.tests is not None:
_first_entry = self.tests[0]
_len = len(self.tests)
_last_entry = self.tests[_len - 1]
else:
_len = 0
self.update()
# Check whether the list got longer (ie: new entry)
if self.tests is not None and len(self.tests) != _len:
# If so, for ATA, return the newest test result
if not ('in progress' in self.tests[0].status or
'NOW' in self.tests[0].hours):
self._test_running = False
self._test_ECD = None
if output == 'str':
return (0, str(self.tests[0]), None)
else:
return (0, self.tests[0], None)
else:
self._test_running = True
elif _len == maxlog:
# If not, because it's max size already, check for new entries
if ((_first_entry.type != self.tests[0].type or
_first_entry.hours != self.tests[0].hours or
_last_entry.type != self.tests[len(self.tests) - 1].type or
_last_entry.hours != self.tests[len(self.tests) - 1].hours)
and not 'NOW' in self.tests[0].hours):
self._test_running = False
self._test_ECD = None
if output == 'str':
return (0, str(self.tests[0]), None)
else:
return (0, self.tests[0], None)
else:
if 'NOW' in self.tests[0].hours:
self._test_running = True
# If nothing new was found, see if we know of a running test.
if self._test_running:
if (not ('in progress' in self.tests[0].status or
'NOW' in self.tests[0].hours) and
smartctl_type[self.interface] == 'scsi'):
self._test_running = False
self._test_ECD = None
if output == 'str':
return (0, str(self.tests[0]), None)
else:
return (0, self.tests[0], None)
else:
return (1, 'Self-test in progress. Please wait.',
self._test_ECD)
else:
return (2, 'No new self-test results found.', None)
else:
# If log is still empty, or did not get longer, see whether we
# know of a running test.
if self._test_running:
if (not ('in progress' in self.tests[0].status or
'NOW' in self.tests[0].hours) and
smartctl_type[self.interface] == 'scsi'):
self._test_running = False
self._test_ECD = None
if output == 'str':
return (0, str(self.tests[0]), None)
else:
return (0, self.tests[0], None)
else:
return (1, 'Self-test in progress. Please wait.',
self._test_ECD)
else:
return (2, 'No new self-test results found.', None)
def _guess_SMART_type(self, line):
"""
This function is not used in the generic wrapper, however the header
is defined so that it can be monkey-patched by another application.
"""
pass
def _make_SMART_warnings(self):
"""
Parses an ATA/SATA SMART table for attributes with the 'when_failed'
value set. Generates an warning message for any such attributes and
updates the self-assessment value if necessary.
"""
if smartctl_type[self.interface] == 'scsi':
return
for attr in self.attributes:
if attr is not None:
if attr.when_failed == 'In_the_past':
self.messages.append("".join(
[attr.name, " failed in the past with value ",
attr.worst, ". [Threshold: ", attr.thresh, ']']))
if not self.assessment == 'FAIL':
self.assessment = 'WARN'
elif attr.when_failed == 'FAILING_NOW':
self.assessment = 'FAIL'
self.messages.append("".join(
[attr.name, " is failing now with value ",
attr.value, ". [Threshold: ", attr.thresh, ']']))
elif not attr.when_failed == '-':
self.messages.append("".join(
[attr.name, " says it failed '", attr.when_failed,
"'. [V=", attr.value, ",W=", attr.worst, ",T=",
attr.thresh, ']']))
if not self.assessment == 'FAIL':
self.assessment = 'WARN'
def run_selftest(self, test_type):
"""
Instructs a device to begin a SMART self-test. All tests are run in
'offline' / 'background' mode, allowing normal use of the device while
it is being tested.
##Args:
* **test_type (str):** The type of test to run. Accepts the following
(not case sensitive):
* **short** - Brief electo-mechanical functionality check.
Generally takes 2 minutes or less.
* **long** - Thorough electro-mechanical functionality check,
including complete recording media scan. Generally takes several
hours.
* **conveyance** - Brief test used to identify damage incurred in
shipping. Generally takes 5 minutes or less. **This test is not
supported by SAS or SCSI devices.**
##Returns:
* **(int):** Return status code. One of the following:
* 0 - Self-test initiated successfully
* 1 - Previous self-test running. Must wait for it to finish.
* 2 - Unknown or illegal test type requested.
* 3 - Unspecified smartctl error. Self-test not initiated.
* **(str):** Return status message.
* **(str):** Estimated self-test completion time if a test is started.
Otherwise 'None'.
"""
if self._test_running:
return (1, 'Self-test already in progress. Please wait.',
self._test_ECD)
if test_type.lower() in ['short', 'long', 'conveyance']:
if (test_type.lower() == 'conveyance' and
smartctl_type[self.interface] == 'scsi'):
return (2, "Cannot perform 'conveyance' test on SAS/SCSI "
"devices.", None)
cmd = Popen('smartctl -d {0} -t {1} /dev/{2}'.format(
smartctl_type[self.interface], test_type, self.name),
shell=True, stdout=PIPE, stderr=PIPE)
_stdout, _stderr = cmd.communicate()
_success = False
_running = False
for line in _stdout.split('\n'):
if 'has begun' in line:
_success = True
self._test_running = True
if 'aborting current test' in line:
_running = True
if _success and 'complete after' in line:
self._test_ECD = line[25:].rstrip()
if _success:
return (0, "Self-test started successfully", self._test_ECD)
else:
if _running:
return (1, 'Self-test already in progress. Please wait.',
self._test_ECD)
else:
return (3, 'Unspecified Error. Self-test not started.',
None)
else:
return (2, "Unknown test type '{0}' requested.".format(test_type),
None)
def update(self):
"""
Queries for device information using smartctl and updates all
class members, including the SMART attribute table and self-test log.
Can be called at any time to refresh the `pySMART.device.Device`
object's data content.
"""
cmd = Popen('smartctl -d {0} -a /dev/{1}'.format(
smartctl_type[self.interface], self.name), shell=True,
stdout=PIPE, stderr=PIPE)
_stdout, _stderr = cmd.communicate()
parse_self_tests = False
parse_ascq = False
self.tests = []
for line in _stdout.split('\n'):
if line.strip() == '': # Blank line stops sub-captures
if parse_self_tests == True:
parse_self_tests = False
if len(self.tests) == 0:
self.tests = None
if parse_ascq:
parse_ascq = False
self.messages.append(message)
if parse_ascq:
message += ' ' + line.lstrip().rstrip()
if parse_self_tests:
num = line[1:3]
if smartctl_type[self.interface] == 'scsi':
format = 'scsi'
test_type = line[5:23].rstrip()
status = line[23:46].rstrip()
segment = line[46:55].lstrip().rstrip()
hours = line[55:65].lstrip().rstrip()
LBA = line[65:78].lstrip().rstrip()
line_ = ' '.join(line.split('[')[1].split()).split(' ')
sense = line_[0]
ASC = line_[1]
ASCQ = line_[2][:-1]
self.tests.append(Test_Entry(
format, num, test_type, status, hours, LBA,
segment=segment, sense=sense, ASC=ASC, ASCQ=ASCQ))
else:
format = 'ata'
test_type = line[5:25].rstrip()
status = line[25:54].rstrip()
remain = line[54:58].lstrip().rstrip()
hours = line[60:68].lstrip().rstrip()
LBA = line[77:].rstrip()
self.tests.append(Test_Entry(
format, num, test_type, status, hours, LBA,
remain=remain))
# Basic device information parsing
if 'Model Family' in line:
self._guess_SMART_type(line.lower())
if 'Device Model' in line or 'Product' in line:
self.model = line.split(':')[1].lstrip().rstrip()
self._guess_SMART_type(line.lower())
if 'Serial Number' in line or 'Serial number' in line:
self.serial = line.split(':')[1].split()[0].rstrip()
if 'LU WWN' in line:
self._guess_SMART_type(line.lower())
if 'Firmware Version' in line or 'Revision' in line:
self.firmware = line.split(':')[1].lstrip().rstrip()
if 'User Capacity' in line:
self.capacity = (
line.replace(']', '[').split('[')[1].lstrip().rstrip())
if 'SMART support' in line:
self.supports_smart = 'Enabled' in line
if 'does not support SMART' in line:
self.supports_smart = False
if 'Rotation Rate' in line:
if 'Solid State Device' in line:
self.is_ssd = True
elif 'rpm' in line:
self.is_ssd = False
if 'SMART overall-health self-assessment' in line: # ATA devices
if line.split(':')[1].strip() == 'PASSED':
self.assessment = 'PASS'
else:
self.assessment = 'FAIL'
if 'SMART Health Status' in line: # SCSI devices
if line.split(':')[1].strip() == 'OK':
self.assessment = 'PASS'
else:
self.assessment = 'FAIL'
parse_ascq = True # Set flag to capture status message
message = line.split(':')[1].lstrip().rstrip()
# SMART Attribute table parsing
if '0x0' in line and '_' in line:
# Replace multiple space separators with a single space, then
# tokenize the string on space delimiters
line_ = ' '.join(line.split()).split(' ')
if not '' in line_:
self.attributes[int(line_[0])] = Attribute(
line_[0], line_[1], line[2], line_[3], line_[4],
line_[5], line_[6], line_[7], line_[8], line_[9])
if 'Description' in line and '(hours)' in line:
parse_self_tests = True # Set flag to capture test entries
if 'No self-tests have been logged' in line:
self.tests = None
# Everything from here on is parsing SCSI information that takes
# the place of similar ATA SMART information
if 'used endurance' in line:
pct = int(line.split(':')[1].strip()[:-1])
self.diags['Life_Left'] = str(100 - pct) + '%'
if 'Specified cycle count' in line:
self.diags['Start_Stop_Spec'] = line.split(':')[1].strip()
if self.diags['Start_Stop_Spec'] == '0':
self.diags['Start_Stop_Pct_Left'] = '-'
if 'Accumulated start-stop cycles' in line:
self.diags['Start_Stop_Cycles'] = line.split(':')[1].strip()
if not 'Start_Stop_Pct_Left' in self.diags:
self.diags['Start_Stop_Pct_Left'] = str(int(round(
100 - (int(self.diags['Start_Stop_Cycles']) /
int(self.diags['Start_Stop_Spec'])), 0))) + '%'
if 'Specified load-unload count' in line:
self.diags['Load_Cycle_Spec'] = line.split(':')[1].strip()
if self.diags['Load_Cycle_Spec'] == '0':
self.diags['Load_Cycle_Pct_Left'] = '-'
if 'Accumulated load-unload cycles' in line:
self.diags['Load_Cycle_Count'] = line.split(':')[1].strip()
if not 'Load_Cycle_Pct_Left' in self.diags:
self.diags['Load_Cycle_Pct_Left'] = str(int(round(
100 - (int(self.diags['Load_Cycle_Count']) /
int(self.diags['Load_Cycle_Spec'])), 0))) + '%'
if 'Elements in grown defect list' in line:
self.diags['Reallocated_Sector_Ct'] = line.split(':')[1].strip()
if 'read:' in line and smartctl_type[self.interface] == 'scsi':
line_ = ' '.join(line.split()).split(' ')
if (line_[1] == '0' and line_[2] == '0' and
line_[3] == '0' and line_[4] == '0'):
self.diags['Corrected_Reads'] = '0'
elif line_[4] == '0':
self.diags['Corrected_Reads'] = str(
int(line_[1]) + int(line_[2]) + int(line_[3]))
else:
self.diags['Corrected_Reads'] = line_[4]
self.diags['Reads_GB'] = line_[6]
self.diags['Uncorrected_Reads'] = line_[7]
if 'write:' in line and smartctl_type[self.interface] == 'scsi':
line_ = ' '.join(line.split()).split(' ')
if (line_[1] == '0' and line_[2] == '0' and
line_[3] == '0' and line_[4] == '0'):
self.diags['Corrected_Writes'] = '0'
elif line_[4] == '0':
self.diags['Corrected_Writes'] = str(
int(line_[1]) + int(line_[2]) + int(line_[3]))
else:
self.diags['Corrected_Writes'] = line_[4]
self.diags['Writes_GB'] = line_[6]
self.diags['Uncorrected_Writes'] = line_[7]
if 'verify:' in line and smartctl_type[self.interface] == 'scsi':
line_ = ' '.join(line.split()).split(' ')
if (line_[1] == '0' and line_[2] == '0' and
line_[3] == '0' and line_[4] == '0'):
self.diags['Corrected_Verifies'] = '0'
elif line_[4] == '0':
self.diags['Corrected_Verifies'] = str(
int(line_[1]) + int(line_[2]) + int(line_[3]))
else:
self.diags['Corrected_Verifies'] = line_[4]
self.diags['Verifies_GB'] = line_[6]
self.diags['Uncorrected_Verifies'] = line_[7]
if 'non-medium error count' in line:
self.diags['Non-Medium_Errors'] = line.split(':')[1].strip()
if 'Accumulated power on time' in line:
self.diags['Power_On_Hours'] = line.split(':')[1].split(' ')[1]
if not smartctl_type[self.interface] == 'scsi':
# Parse the SMART table for below-threshold attributes and create
# corresponding warnings for non-SCSI disks
self._make_SMART_warnings()
else:
# For SCSI disks, any diagnostic attribute which was not captured
# above gets set to '-' to indicate unsupported/unavailable.
for diag in ['Corrected_Reads', 'Corrected_Writes',
'Corrected_Verifies', 'Uncorrected_Reads',
'Uncorrected_Writes', 'Uncorrected_Verifies',
'Reallocated_Sector_Ct',
'Start_Stop_Spec', 'Start_Stop_Cycles',
'Load_Cycle_Spec', 'Load_Cycle_Count',
'Start_Stop_Pct_Left', 'Load_Cycle_Pct_Left',
'Power_On_Hours', 'Life_Left', 'Non-Medium_Errors',
'Reads_GB', 'Writes_GB', 'Verifies_GB']:
if not diag in self.diags:
self.diags[diag] = '-'
# If not obtained above, make a direct attempt to extract power on
# hours from the background scan results log.
if self.diags['Power_On_Hours'] == '-':
cmd = Popen('smartctl -d scsi -l background /dev/{1}'.format(
smartctl_type[self.interface], self.name), shell=True,
stdout=PIPE, stderr=PIPE)
_stdout, _stderr = cmd.communicate()
for line in _stdout.split('\n'):
if 'power on time' in line:
self.diags['Power_On_Hours'] = line.split(
':')[1].split(' ')[1]
__all__ = ['Device']
|
{
"content_hash": "4f51d9b7d035ca8539064c8bbcaba01d",
"timestamp": "",
"source": "github",
"line_count": 607,
"max_line_length": 80,
"avg_line_length": 48.047775947281714,
"alnum_prop": 0.5020058289045088,
"repo_name": "raysguy/HTPC-Manager",
"id": "060b71930b1eb7e8668edefcef161d797dcec473",
"size": "29834",
"binary": false,
"copies": "3",
"ref": "refs/heads/rg-master",
"path": "libs/pySMART/device.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "352"
},
{
"name": "CSS",
"bytes": "48922"
},
{
"name": "HTML",
"bytes": "168137"
},
{
"name": "JavaScript",
"bytes": "463144"
},
{
"name": "Python",
"bytes": "4685775"
},
{
"name": "R",
"bytes": "2187"
},
{
"name": "Shell",
"bytes": "5255"
}
],
"symlink_target": ""
}
|
"""Utilities and helper functions."""
import calendar
import datetime
import decimal
from ceilometer.openstack.common import timeutils
def recursive_keypairs(d, separator=':'):
"""Generator that produces sequence of keypairs for nested dictionaries.
"""
for name, value in sorted(d.iteritems()):
if isinstance(value, dict):
for subname, subvalue in recursive_keypairs(value):
yield ('%s%s%s' % (name, separator, subname), subvalue)
elif isinstance(value, (tuple, list)):
# When doing a pair of JSON encode/decode operations to the tuple,
# the tuple would become list. So we have to generate the value as
# list here.
yield name, list(map(lambda x: unicode(x).encode('utf-8'),
value))
else:
yield name, value
def dt_to_decimal(utc):
"""Datetime to Decimal.
Some databases don't store microseconds in datetime
so we always store as Decimal unixtime.
"""
decimal.getcontext().prec = 30
return decimal.Decimal(str(calendar.timegm(utc.utctimetuple()))) + \
(decimal.Decimal(str(utc.microsecond)) /
decimal.Decimal("1000000.0"))
def decimal_to_dt(dec):
"""Return a datetime from Decimal unixtime format.
"""
if dec is None:
return None
integer = int(dec)
micro = (dec - decimal.Decimal(integer)) * decimal.Decimal(1000000)
daittyme = datetime.datetime.utcfromtimestamp(integer)
return daittyme.replace(microsecond=int(round(micro)))
def sanitize_timestamp(timestamp):
"""Return a naive utc datetime object."""
if not timestamp:
return timestamp
if not isinstance(timestamp, datetime.datetime):
timestamp = timeutils.parse_isotime(timestamp)
return timeutils.normalize_time(timestamp)
def stringify_timestamps(data):
"""Stringify any datetimes in given dict."""
isa_timestamp = lambda v: isinstance(v, datetime.datetime)
return dict((k, v.isoformat() if isa_timestamp(v) else v)
for (k, v) in data.iteritems())
def dict_to_keyval(value, key_base=None):
"""Expand a given dict to its corresponding key-value pairs.
Generated keys are fully qualified, delimited using dot notation.
ie. key = 'key.child_key.grandchild_key[0]'
"""
val_iter, key_func = None, None
if isinstance(value, dict):
val_iter = value.iteritems()
key_func = lambda k: key_base + '.' + k if key_base else k
elif isinstance(value, (tuple, list)):
val_iter = enumerate(value)
key_func = lambda k: key_base + '[%d]' % k
if val_iter:
for k, v in val_iter:
key_gen = key_func(k)
if isinstance(v, dict) or isinstance(v, (tuple, list)):
for key_gen, v in dict_to_keyval(v, key_gen):
yield key_gen, v
else:
yield key_gen, v
|
{
"content_hash": "f7658c2da2863eeaf9e170de14e82ca9",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 78,
"avg_line_length": 33.9080459770115,
"alnum_prop": 0.6247457627118644,
"repo_name": "rackerlabs/instrumented-ceilometer",
"id": "31abb43560c015f05049fc1be973636bfe05f43f",
"size": "3721",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ceilometer/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "149656"
},
{
"name": "JavaScript",
"bytes": "361114"
},
{
"name": "Python",
"bytes": "1897887"
},
{
"name": "Shell",
"bytes": "1322"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name='Pyganim',
version=__import__('pyganim').__version__,
url='http://inventwithpython.com/pyganim',
author='Al Sweigart',
author_email='al@inventwithpython.com',
description=('A sprite animation module for Pygame.'),
license='BSD',
packages=['pyganim'],
test_suite='tests',
keywords="pygame sprite animation game 2D graphics",
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications',
'Environment :: MacOS X',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
|
{
"content_hash": "6d6822036c957f34407b535ef1eaf0cc",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 58,
"avg_line_length": 34.483870967741936,
"alnum_prop": 0.6005612722170253,
"repo_name": "asweigart/pyganim",
"id": "9fe8736d9e661e7543c907350a7ff8a3f7ef3acc",
"size": "1069",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "90789"
}
],
"symlink_target": ""
}
|
from tools import BaseTool
class MyTool(BaseTool):
def __init__(self):
BaseTool.__init__(self, "Hello")
def run(self, browser):
print "ok"
|
{
"content_hash": "4a419d2b87e69af84489d3124c7f439d",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 34,
"avg_line_length": 15,
"alnum_prop": 0.6533333333333333,
"repo_name": "Gr1ph00n/staticwebanalyzer",
"id": "98e5d167a4f88867dd7525bd20126d3239d9cf69",
"size": "302",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "analyzer/my_tool.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Bison",
"bytes": "236695"
},
{
"name": "C",
"bytes": "1610078"
},
{
"name": "C++",
"bytes": "844"
},
{
"name": "CSS",
"bytes": "38005"
},
{
"name": "JavaScript",
"bytes": "74162"
},
{
"name": "Makefile",
"bytes": "15767"
},
{
"name": "Python",
"bytes": "2936957"
},
{
"name": "Shell",
"bytes": "6465"
}
],
"symlink_target": ""
}
|
from sqlalchemy.ext.associationproxy import association_proxy
from octoprint_dashboard.app import db
class User(db.Model):
"""
Instance of this class equals single record of config in database
Class behaves like repository of Config records
User represents human being, logged into app at least once
"""
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
access_token = db.Column(db.String(80))
refresh_token = db.Column(db.String(80))
superadmin = db.Column(db.Boolean, default=False)
group = association_proxy("group_user", "group")
def __init__(self, username=None, access_token=None, refresh_token=None, superadmin=False):
self.username = username
self.access_token = access_token
self.refresh_token = refresh_token
self.superadmin = superadmin
def __repr__(self):
return '<User %r>' % self.username
@staticmethod
def upsert(username, access_token, refresh_token):
"""
Creates user with parameters in database or updates access and refresh token of user
"""
user = User.query.filter_by(username=username).scalar()
if user is None:
user = User(username, access_token, refresh_token)
db.session.add(user)
else:
user.access_token = access_token
user.refresh_token = refresh_token
db.session.commit()
return user
@staticmethod
def upsert_superadmin(username):
"""
Makes user superadmin
"""
user = User.query.filter_by(username=username).scalar()
if user is None:
user = User(username, None, None)
user.superadmin = True
db.session.add(user)
else:
user.superadmin = True
db.session.commit()
def get_accessible_printers(self):
"""
Returns all printers accessible to user with admin role
"""
from octoprint_dashboard.model import Printer, Group, GroupUser
if self.superadmin:
printers = Printer.query.all()
else:
printers = Printer.query.join(Printer.group).join(Group.group_user).filter(User.id == self.id,
GroupUser.role == "admin").all()
return printers
def get_accessible_printers_id(self, printer_ids):
"""
Returns printers of given ids accessible to user with admin role
"""
from octoprint_dashboard.model import Printer, Group, GroupUser
if self.superadmin:
printers = Printer.query.filter(Printer.id.in_(printer_ids)).all()
else:
printers = Printer.query.filter(Printer.id.in_(printer_ids)) \
.join(Printer.group).join(Group.group_user) \
.filter(User.id == self.id, GroupUser.role == "admin").all()
return printers
def get_accessible_printer_id(self, printer_id):
"""
Returns printer of given id accessible to user with admin role or None
"""
from octoprint_dashboard.model import Printer, Group, GroupUser
if self.superadmin:
printer = Printer.query.get(printer_id)
else:
printer = Printer.query.filter(Printer.id == printer_id) \
.join(Printer.group).join(Group.group_user) \
.filter(User.id == self.id, GroupUser.role == "admin").scalar()
return printer
def get_printer_id(self, printer_id):
"""
Returns printer of given id accessible to user or None
"""
from octoprint_dashboard.model import Printer, Group
if self.superadmin:
printer = Printer.query.get(printer_id)
else:
printer = Printer.query.filter(Printer.id == printer_id) \
.join(Printer.group).join(Group.group_user) \
.filter(User.id == self.id).scalar()
return printer
def get_editable_groups(self):
"""
Returns groups accessible to user with admin role
"""
from octoprint_dashboard.model import Group, GroupUser
if self.superadmin:
groups = Group.query.all()
else:
groups = Group.query.join(Group.group_user).join(GroupUser.user).filter(User.id == self.id).filter(
GroupUser.role == "admin").all()
return groups
def get_editable_group_id(self, id):
"""
Returns groups of given id accessible to user with admin role
"""
from octoprint_dashboard.model import Group, GroupUser
if self.superadmin:
group = Group.query.get(id)
else:
group = Group.query.join(Group.group_user).join(GroupUser.user).filter(User.id == self.id).filter(
GroupUser.role == "admin").filter(Group.id == id).scalar()
return group
def get_groups(self):
"""
Returns groups accessible to user
"""
from octoprint_dashboard.model import Group, GroupUser
if self.superadmin:
groups = Group.query.all()
else:
groups = Group.query.join(Group.group_user).join(GroupUser.user).filter(User.id == self.id).all()
return groups
|
{
"content_hash": "bfa3f4cdd40e1aecbe5930a3bb66f94f",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 119,
"avg_line_length": 34.97385620915033,
"alnum_prop": 0.5954027284619697,
"repo_name": "meadowfrey/OctoPrint-Dashboard",
"id": "ac674119de64fa86b8153dff1de4cd2ebb69e0f0",
"size": "5351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "octoprint_dashboard/model/User.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1690"
},
{
"name": "HTML",
"bytes": "31278"
},
{
"name": "JavaScript",
"bytes": "45931"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "64319"
}
],
"symlink_target": ""
}
|
import re
import json
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from robobrowser import RoboBrowser
import requests
import dateutil.parser
from pivoteer.models import ExternalSessions
from core.utilities import discover_type
import lxml.html
from bs4 import BeautifulSoup
class MechanizedScraper(object):
def __init__(self):
# Create and configure browser object for navigation
agent = 'Mozilla/5.0 (Windows NT 5.1; rv:23.0) Gecko/20100101 Firefox/23.0'
self.browser = RoboBrowser(user_agent=agent, parser='html5lib')
def extract_string(self, full_string, first_param, last_param):
try:
start = full_string.index(first_param) + len(first_param)
end = full_string.index(last_param, start)
return full_string[start:end]
except ValueError:
return ""
def check_type(self, indicator):
return discover_type(indicator)
class RobtexMonitorScraper(MechanizedScraper):
"""
Created by: LNguyen
Date: 17April2017
Created class to handle web scraping logic for the certificate monitoring function. This class will be referenced by the Monitor function.
"""
def __init__(self):
MechanizedScraper.__init__(self)
def run(self, ip):
"""
Created by: LNguyen
Date: 26January2017
Updated scraping logic because of existing bug that was dependent finding an ID = shared_ma that no longer existed in the Robtex web pages.
The new logic finds a list of shared domains located in the tag <ol class:xbul.>
:param ip: The ip address to scrape the Robtex web page for
:return: A list of domains found for the given ip address
"""
results = []
url_param = ip.replace(".", "/")
url = "https://www.robtex.com/en/advisory/ip/" + url_param + "/shared.html"
# print("url:",url)
self.browser.open(url)
parser = self.browser.parsed
search = parser.find("ol", {"class": "xbul"})
# print("search: ", search)
total = 0
if search is not None:
for result in search.find_all("li"):
total += 1
if total > 100:
break
else:
result_value = result.text
# print("result_value: ",result.text)
if ' ' in result_value:
result_value = re.sub(' ', '.', result_value)
results.append(result_value)
else:
results.append(result_value)
# print("scraperesults:",results)
# print("robtex_total:",total)
return results
class RobtexScraper(MechanizedScraper):
def __init__(self):
MechanizedScraper.__init__(self)
def run(self, ip):
results = []
url_param = ip.replace(".", "/")
url = "https://www.robtex.com/en/advisory/ip/" + url_param + "/shared.html"
self.browser.open(url)
parser = self.browser.parsed
search = parser.find("span", {"id": "shared_ma"})
if search is not None:
# count = self.extract_string(search.text, "(", " shown")
# if int(count) <= 50:
for result in search.parent.parent.find("ol", {"class": "xbul"}).findChildren('li'):
result_value = result.text
if ' ' in result_value:
result_value = re.sub(' ', '.', result_value)
results.append(result_value)
else:
results.append(result_value)
# else:
# results.append("%s domains identified" % str(count))
return results
#
# def run(self, ip):
# results = []
#
# url_param = ip.replace(".", "/")
#
# url = "https://www.robtex.com/en/advisory/ip/" + url_param + "/shared.html"
# print("url:", url)
# self.browser.open(url)
# # print("response:", self.browser.response)
# parser = self.browser.parsed
#
# # print("parser:", parser)
# # search = parser.find("span", {"id": "shared_ma"})
# search = parser.find("span", {"id": "shared"})
# print("search: ", search)
# if search is not None:
# # count = self.extract_string(search.text, "(", " shown")
# # if int(count) <= 50:
# if search.parent.parent.parent.find("ol", {"class": "xbul"}):
#
# # for result in search.parent.parent.find("ol", {"class": "xbul"}).findChildren('li'):
# for result in search.parent.parent.parent.find("ol", {"class": "xbul"}).findChildren('li'):
# result_value = result.text
# print("result_value: ", result.text)
# if ' ' in result_value:
# result_value = re.sub(' ', '.', result_value)
# results.append(result_value)
#
# else:
# results.append(result_value)
#
# # else:
# # results.append("%s domains identified" % str(count))
# print("scraperesults:", results)
# return results
class GoogleScraper(MechanizedScraper):
def __init__(self):
MechanizedScraper.__init__(self)
self.results = {'top_results': [], 'result_count': 0}
def run(self, query):
self.browser.open("https://www.startpage.com/")
form = self.browser.get_form(id="search_form")
form['query'].value = "\"" + query + "\""
form['abp'].value = "true"
self.browser.submit_form(form)
parser = self.browser.parsed
# Prepare tables
headlines = []
links = []
descriptions = []
# Return number of results from search
count = parser.find('div', {'id': 'results_content'}).find('p', {'id': 'results_count_p'})
sanitized_count = str(count)
first = "About"
last = "results ("
self.results['result_count'] = self.extract_string(sanitized_count, first, last)
# Return first page of results #
page = parser.find('div', {'id': 'results'})
section = []
for element in page:
if element.name == "ol":
section = element.find_all('li')
break
for li in section:
try:
headline = li.h3.text
link = li.a['href']
description = li.find('p', {'class': 'desc'}).text
result = [headline, link, description]
self.results['top_results'].append(result)
except:
pass
return self.results
class VirusTotalScraper(MechanizedScraper):
def __init__(self):
MechanizedScraper.__init__(self)
# Set additional header parameters; VirusTotal won't return content without them
self.browser.session.headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
self.browser.session.headers["Accept-Encoding"] = "gzip, deflate"
self.browser.session.headers["Accept-Language"] = "en-US,en;q=0.5"
def get_passive(self, indicator):
results = []
indicator_type = self.check_type(indicator)
if indicator_type == "ip":
url = "https://www.virustotal.com/en/ip-address/" + indicator + "/information/"
self.browser.open(url)
elif indicator_type == "domain":
url = "https://www.virustotal.com/en/domain/" + indicator + "/information/"
self.browser.open(url)
parsed_html = self.browser.parsed
try:
alert = parsed_html.find('div', {'id': 'dns-resolutions'}).find('div', {'class': 'alert'})
except AttributeError:
alert = None
if not alert:
records = parsed_html.find('div', {'id': 'dns-resolutions'})
if records is not None:
for item in records.find_all('div'):
entry = item.text.replace(" ", "").splitlines()
entry = list(filter(None, entry))
if indicator_type == "ip":
results.append({'ip': indicator, 'domain': entry[1], 'date': entry[0], 'firstseen': entry[0], 'lastseen':'','ip_location': {}})
elif indicator_type == "domain":
results.append({'ip': entry[1], 'domain': indicator, 'date': entry[0], 'firstseen': entry[0], 'lastseen':'','ip_location': {}})
# results.append({'ip': entry[1], 'domain': indicator, 'date': entry[0], 'ip_location': {}})
return results
def get_malware(self, indicator):
results = []
indicator_type = self.check_type(indicator)
if indicator_type == "ip":
url = "https://www.virustotal.com/en/ip-address/" + indicator + "/information/"
self.browser.open(url)
elif indicator_type == "domain":
url = "https://www.virustotal.com/en/domain/" + indicator + "/information/"
self.browser.open(url)
parsed_html = self.browser.parsed
try:
alert = parsed_html.find('div', {'id': 'dns-resolutions'}).find('div', {'class': 'alert'})
except AttributeError:
alert = None
if not alert:
#records = parsed_html.find('div', {'id': 'undetected-referrer'})
records = parsed_html.find('div', {'id': lambda L: L and L.startswith('undetected')})
#base_link_url = "https://www.virustotal.com/en/file/"
base_link_url = "https://www.virustotal.com"
if records is not None:
for item in records.find_all('div'):
sha256 = item.find('a').text.replace('\n', '').strip()
#raw = item.text.splitlines()
#entries = [entry.strip() for entry in raw]
#cleaned = list(filter(None, entries))[1:] # Remove detection ratio
#link = base_link_url + str(cleaned[0]) + "/analysis"
link = base_link_url + item.find('a').get('href')
results.append({"date": "", "link": link, "C2": indicator,
"md5": "", "sha1": "", "sha256": sha256})
return results
#
# def get_malware(self, indicator):
# results = []
# indicator_type = self.check_type(indicator)
#
# if indicator_type == "ip":
# url = "https://www.virustotal.com/en/ip-address/" + indicator + "/information/"
# self.browser.open(url)
#
# elif indicator_type == "domain":
# url = "https://www.virustotal.com/en/domain/" + indicator + "/information/"
# self.browser.open(url)
#
# parsed_html = self.browser.parsed
# alert = parsed_html.find('div', {'id': 'dns-resolutions'}).find('div', {'class': 'alert'})
#
# if not alert:
# records = parsed_html.find('div', {'id': 'detected-communicating'})
# base_link_url = "https://www.virustotal.com/en/file/"
#
# if records is not None:
# for item in records.find_all('div'):
# raw = item.text.splitlines()
# entries = [entry.strip() for entry in raw]
# cleaned = list(filter(None, entries))[1:] # Remove detection ratio
# link = base_link_url + str(cleaned[1]) + "/analysis"
#
# results.append({"date": cleaned[0], "link": link, "C2": indicator,
# "md5": "", "sha1": "", "sha256": cleaned[1]})
#
# return results
class ThreatExpertScraper(MechanizedScraper):
def __init__(self):
MechanizedScraper.__init__(self)
def run(self, indicator):
results = []
self.browser.open("http://www.threatexpert.com/reports.aspx")
form = self.browser.get_form(action="reports.aspx")
form['find'].value = "\"" + indicator + "\""
self.browser.submit_form(form)
parser = self.browser.parsed
# Return number of results from search [0] + number of pages of results [1]
section = parser.find('span', {'id': 'txtResults'}).find_all('table')
if section:
if len(section) > 1:
page_count = len(section[1].find_all('td')) - 1 # Acquire page count
else:
page_count = 1
# scrape current page
data = section[0].find_all('tr')
page = self.scrape_page(data, indicator)
results.extend(page)
# Gather records from subsequent pages
for x in range(2, page_count + 1):
url = "http://www.threatexpert.com/reports.aspx?page=%s&find=%s" % (x, indicator)
self.browser.open(url)
parser = self.browser.parsed
section = parser.find('span', {'id': 'txtResults'}).find('table')
if section:
data = section.find_all('tr')
page = self.scrape_page(data, indicator)
results.extend(page)
return results
def scrape_page(self, data, indicator):
results = []
container = []
base_link_url = "http://threatexpert.com/report.aspx?md5="
for row in data[1:]: # Remove Headers
entries = row.find_all('td')
for entry in entries:
link = entry.find('a')
if link:
container.append(link['href'][16:]) # splice off report.aspx?md5=
else:
container.append(entry.text)
raw_time = container[0] # Default Date Format 6/8/2013 3:05:55 AM
clean_time = dateutil.parser.parse(raw_time) # Convert to datetime object
container[0] = clean_time.strftime('%Y-%m-%d %H:%M:%S')
link = base_link_url + str(container[-1])
# Container = [Date, Risk, Origin, MD5], Remove Risk and Origin
results.append({"date": container[0], "link": link, "C2": indicator,
"md5": container[-1], "sha1": "", "sha256": ""})
container[:] = []
return results
class InternetIdentityScraper(MechanizedScraper):
def __init__(self):
MechanizedScraper.__init__(self)
self.service = "IID"
self.username = settings.IID_USER
self.password = settings.IID_PASS
self.results = []
def run(self, indicator):
self.load_cookie()
cookie_valid = self.check_cookie()
if cookie_valid:
indicator_type = self.check_type(indicator)
if indicator_type == "ip":
self.scrape_data(indicator, "A") # query_type = "A" for IP
elif indicator_type == "domain":
self.scrape_data(indicator, "H") # query_type = "H" for domain
self.scrape_data(indicator, "X") # query_type = "X" for sub-domains
return self.results
def load_cookie(self):
try:
session = ExternalSessions.objects.get(service=self.service)
cookie = requests.utils.cookiejar_from_dict(session.cookie)
self.browser.session.cookies = cookie
valid_cookie = self.check_cookie()
if not valid_cookie:
self.login()
except ObjectDoesNotExist:
self.login()
def check_cookie(self):
url = "https://research.iad.internetidentity.com"
self.browser.open(url, verify=False)
parser = self.browser.parsed
# Verify login succeeded
login_test = parser.find_all('a', {'href': '/logout.php'})
if login_test:
return True
return False
def save_cookie(self):
cookie = json.dumps(requests.utils.dict_from_cookiejar(self.browser.session.cookies))
try:
session = ExternalSessions.objects.get(service=self.service)
session.cookie = cookie
session.save(update_fields=['cookie'])
except ObjectDoesNotExist:
session = ExternalSessions(service=self.service,
cookie=cookie)
session.save()
def login(self):
url = "https://research.iad.internetidentity.com/login.php"
self.browser.open(url, verify=True)
form = self.browser.get_form()
form['username'].value = self.username
form['password'].value = self.password
self.browser.submit_form(form)
self.save_cookie()
def scrape_data(self, indicator, query_type):
passive_table = []
# search period 7 is "complete history"
search_period = '7'
# 0 = Current Day
# 1 = Past 72 Hours
# 2 = Past Week
# 3 = Past Month
# 4 = Past 3 Months
# 5 = Past 6 Months
# 6 = Past Year
format = '0'
# 0 = Display results on screen
# 1 = Output to CSV file (Comma separated w/o quotes)
# 2 = Output to CSV file (Comma separated with quotes)
# 3 = Output to CSV file (Tab separated w/o quotes)
# 4 = Output to CSV file (Tab separated with quotes)
# 5 = Output to CSV file (Pipe separated w/o quotes)
# 6 = Output to CSV file (Pipe separated with quotes)
# queryType
# A = Query IP Address or CIDR,
# H = Query Hostname
# X = Query Domain Name for Hosts
# D = Query Domain for Authoritative Nameservers
# N = Query Nameserver for Authoritative Domains
url = "https://research.iad.internetidentity.com/index.php?period=" + search_period + "&format=" + format + "&queryType=" + query_type + "&target=" + indicator + "&submit=Submit+Query"
self.browser.open(url,timeout=20000)
parser = self.browser.parsed
search = parser.find("table", {"style": "text-align: left; margin-left: auto; margin-right: auto;"})
for tr in search.find('tbody'):
#for tr in parser.find_all('tr')[7:]:
tds = []
if tr != "\n":
for td in tr.find_all('td'):
tds.append(td.text.strip())
# check that table data exists
if len(tds) == 4:
IID_seen = tds[0]
IID_host = tds[1]
IID_qType = tds[2]
IID_ip = tds[3]
passive_table.append({'ip': IID_ip, 'domain': IID_host, 'date': IID_seen, 'firstseen': IID_seen,'lastseen': {}, 'ip_location': {}})
tds[:] = []
self.results.extend(passive_table)
#
# def scrape_data(self, indicator, query_type):
#
# passive_table = []
#
# # search period 7 is "complete history"
# search_period = '7'
#
# # 0 = Current Day
# # 1 = Past 72 Hours
# # 2 = Past Week
# # 3 = Past Month
# # 4 = Past 3 Months
# # 5 = Past 6 Months
# # 6 = Past Year
#
# format = '0'
# # 0 = Display results on screen
# # 1 = Output to CSV file (Comma separated w/o quotes)
# # 2 = Output to CSV file (Comma separated with quotes)
# # 3 = Output to CSV file (Tab separated w/o quotes)
# # 4 = Output to CSV file (Tab separated with quotes)
# # 5 = Output to CSV file (Pipe separated w/o quotes)
# # 6 = Output to CSV file (Pipe separated with quotes)
#
# # queryType
# # A = Query IP Address or CIDR,
# # H = Query Hostname
# # X = Query Domain Name for Hosts
# # D = Query Domain for Authoritative Nameservers
# # N = Query Nameserver for Authoritative Domains
#
# url = "https://research.iad.internetidentity.com/index.php?period=" + search_period + "&format=" + format + "&queryType=" + query_type + "&target=" + indicator + "&submit=Submit+Query"
#
# self.browser.open(url)
# parser = self.browser.parsed
#
# for tr in parser.find_all('tr')[7:]:
#
# tds = []
# for td in tr.find_all('td'):
# tds.append(td.text.strip())
#
# # check that table data exists
# if len(tds) == 4:
# IID_seen = tds[0]
# IID_host = tds[1]
# IID_qType = tds[2]
# IID_ip = tds[3]
#
# passive_table.append({'ip': IID_ip, 'domain': IID_host, 'date': IID_seen, 'ip_location': {}})
#
# tds[:] = []
#
# self.results.extend(passive_table)
|
{
"content_hash": "2d58549e767a95eae1d7f1f7199c90c5",
"timestamp": "",
"source": "github",
"line_count": 608,
"max_line_length": 194,
"avg_line_length": 34.05427631578947,
"alnum_prop": 0.5365853658536586,
"repo_name": "LindaTNguyen/RAPID",
"id": "0519bc0705cb7747d279a37c38c072516c25bfb8",
"size": "20705",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pivoteer/collectors/scrape.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "184203"
},
{
"name": "HTML",
"bytes": "7180410"
},
{
"name": "JavaScript",
"bytes": "1702997"
},
{
"name": "PHP",
"bytes": "55444"
},
{
"name": "Python",
"bytes": "313767"
},
{
"name": "Shell",
"bytes": "31724"
}
],
"symlink_target": ""
}
|
import unittest
from mock import patch
import gevent
import gevent.queue
from steam.core.cm import CMClient
class CMClient_Scenarios(unittest.TestCase):
test_channel_key = b'SESSION KEY LOL'
def setUp(self):
# mock out crypto
patcher = patch('steam.core.crypto.generate_session_key')
self.addCleanup(patcher.stop)
self.gen_skey = patcher.start()
self.gen_skey.return_value = (self.test_channel_key, b'PUBKEY ENCRYPTED SESSION KEY')
patcher = patch('steam.core.crypto.symmetric_encrypt')
self.addCleanup(patcher.stop)
self.s_enc = patcher.start()
self.s_enc.side_effect = lambda m, k: m
patcher = patch('steam.core.crypto.symmetric_encrypt_HMAC')
self.addCleanup(patcher.stop)
self.s_enc_hmac = patcher.start()
self.s_enc_hmac.side_effect = lambda m, k, mac: m
patcher = patch('steam.core.crypto.symmetric_decrypt')
self.addCleanup(patcher.stop)
self.s_dec = patcher.start()
self.s_dec.side_effect = lambda c, k: c
patcher = patch('steam.core.crypto.symmetric_decrypt_HMAC')
self.addCleanup(patcher.stop)
self.s_dec_hmac = patcher.start()
self.s_dec_hmac.side_effect = lambda c, k, mac: c
# mock out TCPConnection
patcher = patch('steam.core.cm.TCPConnection', autospec=True)
self.addCleanup(patcher.stop)
self.conn = patcher.start().return_value
self.conn_in = gevent.queue.Queue()
self.conn.__iter__.return_value = self.conn_in
# mock out CMServerList
patcher = patch('steam.core.cm.CMServerList', autospec=True)
self.addCleanup(patcher.stop)
self.server_list = patcher.start().return_value
self.server_list.__iter__.return_value = [(127001, 20000+i) for i in range(10)]
self.server_list.bootstrap_from_webapi.return_value = False
self.server_list.bootstrap_from_dns.return_value = False
@patch.object(CMClient, 'emit')
@patch.object(CMClient, '_recv_messages')
def test_connect(self, mock_recv, mock_emit):
# setup
self.conn.connect.return_value = True
self.server_list.__len__.return_value = 10
# run
cm = CMClient()
with gevent.Timeout(2, False):
cm.connect(retry=1)
gevent.idle()
# verify
self.conn.connect.assert_called_once_with((127001, 20000))
mock_emit.assert_called_once_with('connected')
mock_recv.assert_called_once_with()
@patch.object(CMClient, 'emit')
@patch.object(CMClient, '_recv_messages')
def test_connect_auto_discovery_failing(self, mock_recv, mock_emit):
# setup
self.conn.connect.return_value = True
self.server_list.__len__.return_value = 0
# run
cm = CMClient()
with gevent.Timeout(3, False):
cm.connect(retry=1)
gevent.idle()
# verify
self.server_list.bootstrap_from_webapi.assert_called_once_with()
self.server_list.bootstrap_from_dns.assert_called_once_with()
self.conn.connect.assert_not_called()
@patch.object(CMClient, 'emit')
@patch.object(CMClient, '_recv_messages')
def test_connect_auto_discovery_success(self, mock_recv, mock_emit):
# setup
self.conn.connect.return_value = True
self.server_list.__len__.return_value = 0
def fake_servers(*args, **kwargs):
self.server_list.__len__.return_value = 10
return True
self.server_list.bootstrap_from_webapi.side_effect = fake_servers
# run
cm = CMClient()
with gevent.Timeout(3, False):
cm.connect(retry=1)
gevent.idle()
# verify
self.server_list.bootstrap_from_webapi.assert_called_once_with()
self.server_list.bootstrap_from_dns.assert_not_called()
self.conn.connect.assert_called_once_with((127001, 20000))
mock_emit.assert_called_once_with('connected')
mock_recv.assert_called_once_with()
def test_channel_encrypt_sequence(self):
# setup
self.conn.connect.return_value = True
# run ------------
cm = CMClient()
cm.connected = True
gevent.spawn(cm._recv_messages)
# recieve ChannelEncryptRequest
self.conn_in.put(b'\x17\x05\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x01\x00\x00\x00')
gevent.idle(); gevent.idle(); gevent.idle(); gevent.idle()
self.conn.put_message.assert_called_once_with(b'\x18\x05\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x80\x00\x00\x00PUBKEY ENCRYPTED SESSION KEY\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h-\xc4@\x00\x00\x00\x00')
# recieve ChannelEncryptResult (OK)
self.conn_in.put(b'\x19\x05\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00')
cm.wait_event('channel_secured', timeout=2, raises=True)
|
{
"content_hash": "980bf76ce6345bf34d70e64057016a03",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 621,
"avg_line_length": 38.707142857142856,
"alnum_prop": 0.6401550101494741,
"repo_name": "ValvePython/steam",
"id": "b2c8413f65643cf629ad928629dbc0f3bdf4760d",
"size": "5419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_core_cm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2566"
},
{
"name": "Python",
"bytes": "470889"
}
],
"symlink_target": ""
}
|
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest import config
from tempest import test
CONF = config.CONF
class VolumesTestJSON(base.BaseV2ComputeTest):
"""
This test creates a number of 1G volumes. To run successfully,
ensure that the backing file for the volume group that Nova uses
has space for at least 3 1G volumes!
If you are running a Devstack environment, ensure that the
VOLUME_BACKING_FILE_SIZE is atleast 4G in your localrc
"""
@classmethod
def skip_checks(cls):
super(VolumesTestJSON, cls).skip_checks()
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@classmethod
def setup_clients(cls):
super(VolumesTestJSON, cls).setup_clients()
cls.client = cls.volumes_extensions_client
@classmethod
def resource_setup(cls):
super(VolumesTestJSON, cls).resource_setup()
# Create 3 Volumes
cls.volume_list = []
cls.volume_id_list = []
for i in range(3):
v_name = data_utils.rand_name('volume')
metadata = {'Type': 'work'}
try:
volume = cls.client.create_volume(size=CONF.volume.volume_size,
display_name=v_name,
metadata=metadata)['volume']
waiters.wait_for_volume_status(cls.client,
volume['id'], 'available')
volume = cls.client.show_volume(volume['id'])['volume']
cls.volume_list.append(volume)
cls.volume_id_list.append(volume['id'])
except Exception:
if cls.volume_list:
# We could not create all the volumes, though we were able
# to create *some* of the volumes. This is typically
# because the backing file size of the volume group is
# too small. So, here, we clean up whatever we did manage
# to create and raise a SkipTest
for volume in cls.volume_list:
cls.delete_volume(volume['id'])
msg = ("Failed to create ALL necessary volumes to run "
"test. This typically means that the backing file "
"size of the nova-volumes group is too small to "
"create the 3 volumes needed by this test case")
raise cls.skipException(msg)
raise
@classmethod
def resource_cleanup(cls):
# Delete the created Volumes
for volume in cls.volume_list:
cls.delete_volume(volume['id'])
super(VolumesTestJSON, cls).resource_cleanup()
@test.idempotent_id('bc2dd1a0-15af-48e5-9990-f2e75a48325d')
def test_volume_list(self):
# Should return the list of Volumes
# Fetch all Volumes
fetched_list = self.client.list_volumes()['volumes']
# Now check if all the Volumes created in setup are in fetched list
missing_volumes = [
v for v in self.volume_list if v not in fetched_list
]
self.assertFalse(missing_volumes,
"Failed to find volume %s in fetched list" %
', '.join(m_vol['displayName']
for m_vol in missing_volumes))
@test.idempotent_id('bad0567a-5a4f-420b-851e-780b55bb867c')
def test_volume_list_with_details(self):
# Should return the list of Volumes with details
# Fetch all Volumes
fetched_list = self.client.list_volumes(detail=True)['volumes']
# Now check if all the Volumes created in setup are in fetched list
missing_volumes = [
v for v in self.volume_list if v not in fetched_list
]
self.assertFalse(missing_volumes,
"Failed to find volume %s in fetched list" %
', '.join(m_vol['displayName']
for m_vol in missing_volumes))
@test.idempotent_id('1048ed81-2baf-487a-b284-c0622b86e7b8')
def test_volume_list_param_limit(self):
# Return the list of volumes based on limit set
params = {'limit': 2}
fetched_vol_list = self.client.list_volumes(**params)['volumes']
self.assertEqual(len(fetched_vol_list), params['limit'],
"Failed to list volumes by limit set")
@test.idempotent_id('33985568-4965-49d5-9bcc-0aa007ca5b7a')
def test_volume_list_with_detail_param_limit(self):
# Return the list of volumes with details based on limit set.
params = {'limit': 2}
fetched_vol_list = self.client.list_volumes(detail=True,
**params)['volumes']
self.assertEqual(len(fetched_vol_list), params['limit'],
"Failed to list volume details by limit set")
@test.idempotent_id('51c22651-a074-4ea7-af0b-094f9331303e')
def test_volume_list_param_offset_and_limit(self):
# Return the list of volumes based on offset and limit set.
# get all volumes list
all_vol_list = self.client.list_volumes()['volumes']
params = {'offset': 1, 'limit': 1}
fetched_vol_list = self.client.list_volumes(**params)['volumes']
# Validating length of the fetched volumes
self.assertEqual(len(fetched_vol_list), params['limit'],
"Failed to list volumes by offset and limit")
# Validating offset of fetched volume
for index, volume in enumerate(fetched_vol_list):
self.assertEqual(volume['id'],
all_vol_list[index + params['offset']]['id'],
"Failed to list volumes by offset and limit")
@test.idempotent_id('06b6abc4-3f10-48e9-a7a1-3facc98f03e5')
def test_volume_list_with_detail_param_offset_and_limit(self):
# Return the list of volumes details based on offset and limit set.
# get all volumes list
all_vol_list = self.client.list_volumes(detail=True)['volumes']
params = {'offset': 1, 'limit': 1}
fetched_vol_list = self.client.list_volumes(detail=True,
**params)['volumes']
# Validating length of the fetched volumes
self.assertEqual(len(fetched_vol_list), params['limit'],
"Failed to list volume details by offset and limit")
# Validating offset of fetched volume
for index, volume in enumerate(fetched_vol_list):
self.assertEqual(volume['id'],
all_vol_list[index + params['offset']]['id'],
"Failed to list volume details by "
"offset and limit")
|
{
"content_hash": "3090b6283846f5e2e328cdf6b119a39f",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 79,
"avg_line_length": 45.42948717948718,
"alnum_prop": 0.5738676449837731,
"repo_name": "xbezdick/tempest",
"id": "f0ed141d7a592e9ec4676868b3615a5119f37188",
"size": "7723",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tempest/api/compute/volumes/test_volumes_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2880166"
},
{
"name": "Shell",
"bytes": "8578"
}
],
"symlink_target": ""
}
|
"""Define MySQL permissions.
This module implements a domain-specific language (in Python) for defining MySQL
permissions.
A permissions file can use this framework by running::
SetAllowedFields(['ssl_cipher', 'x509_issuer', 'x509_subject'])
user = Account(username='droid', password_hash='*ABD2189031289051',
ssl_cipher='', x509_issuer='', x509_subject='')
user.AddAllowedHost(hostname_pattern='%')
user.Export(set='secondary')
Chained method calls may also be used::
(Account(username='droid', password_hash='*ABD2189031289051',
ssl_cipher='', x509_issuer='', x509_subject='')
.AddAllowedHost(hostname_pattern='%')
.GrantPrivileges(privileges=SUPER)
.Export(set='secondary')
.GrantPrivileges(privileges=ALL_PRIVILEGES)
.Export(set='primary'))
SetAllowedFields() should be called once, before defining any Accounts.
See the documentation for L{Account}.
"""
__author__ = 'flamingcow@google.com (Ian Gulliver)'
__docformat__ = 'epytext en'
import copy
import functools
try:
from ..pylib import db
except (ValueError, ImportError):
from pylib import db
import utils
# Dictionary of setname -> Set.
# This variable is used during push/publish/dump operations; it should not
# appear in a permissions file.
SETS = {}
class Error(Exception):
"""The base class for exceptions in this module."""
class DuplicateAccount(Error):
"""Raised when an account is exported to the same set."""
class InvalidPrivileges(Error):
"""Raised when invalid privilege combinations are attempted."""
class InvalidUsername(Error):
"""Raised when an invalid username is set."""
class InvalidKey(Error):
"""Raised when an unknown field or comment name is used in SetFields."""
class MissingFieldsError(Error):
"""Raised when exporting an account with no username or password."""
class NeedDBAccess(Error):
"""Raised when a database handle is required to build a permission set."""
class NoSuchAccount(Error):
"""Raised when an account name is not found (by FindAccount)."""
class NoSuchSet(Error):
"""Raised when a permissions set is not found (by FindAccount)."""
class DecryptionRequired(Error):
"""Raised when an account that needs decryption is used."""
class DecryptionFailed(Error):
"""Raised when decryption fails, due to invalid key or data."""
class NoAllowedHosts(Error):
"""Account has no allowed hosts but is exported."""
_TOTAL_PRIVS = 28
_PRIVS = [2**x for x in xrange(_TOTAL_PRIVS)]
(
SELECT,
INSERT,
UPDATE,
DELETE,
CREATE,
DROP,
RELOAD,
SHUTDOWN,
PROCESS,
FILE,
GRANT,
REFERENCES,
INDEX,
ALTER,
SHOW_DATABASES,
SUPER,
CREATE_TEMP_TABLE,
LOCK_TABLES,
EXECUTE,
REPL_SLAVE,
REPL_CLIENT,
CREATE_VIEW,
SHOW_VIEW,
CREATE_ROUTINE,
ALTER_ROUTINE,
CREATE_USER,
EVENT,
TRIGGER,
) = _PRIVS
ALL_PRIVILEGES = sum(_PRIVS)
"""All privileges. See L{Account.GrantPrivileges()}."""
# Using this in place of a database name causes substitution for the "main
# database", e.g. "ads54".
DEFAULT = db.Literal('DATABASE()')
"""Refers to the (sharded) database name.
This is used to indicate that a permission should be set on the database
specified by the --db flag during a push/publish/dump operation. See
L{Account.GrantPrivileges}().
"""
_FIELD_NAMES = {
SELECT: 'Select_priv',
INSERT: 'Insert_priv',
UPDATE: 'Update_priv',
DELETE: 'Delete_priv',
CREATE: 'Create_priv',
DROP: 'Drop_priv',
RELOAD: 'Reload_priv',
SHUTDOWN: 'Shutdown_priv',
PROCESS: 'Process_priv',
FILE: 'File_priv',
GRANT: 'Grant_priv',
REFERENCES: 'References_priv',
INDEX: 'Index_priv',
ALTER: 'Alter_priv',
SHOW_DATABASES: 'Show_db_priv',
SUPER: 'Super_priv',
CREATE_TEMP_TABLE: 'Create_tmp_table_priv',
LOCK_TABLES: 'Lock_tables_priv',
EXECUTE: 'Execute_priv',
REPL_SLAVE: 'Repl_slave_priv',
REPL_CLIENT: 'Repl_client_priv',
CREATE_VIEW: 'Create_view_priv',
SHOW_VIEW: 'Show_view_priv',
CREATE_ROUTINE: 'Create_routine_priv',
ALTER_ROUTINE: 'Alter_routine_priv',
CREATE_USER: 'Create_user_priv',
EVENT: 'Event_priv',
TRIGGER: 'Trigger_priv',
}
_FIELD_SET_NAMES = {
SELECT: 'Select',
INSERT: 'Insert',
UPDATE: 'Update',
DELETE: 'Delete',
CREATE: 'Create',
DROP: 'Drop',
GRANT: 'Grant',
REFERENCES: 'References',
INDEX: 'Index',
ALTER: 'Alter',
CREATE_VIEW: 'Create View',
SHOW_VIEW: 'Show view',
TRIGGER: 'Trigger',
}
_ALLOWED_FIELDS = []
_ALLOWED_COMMENTS = []
def SetAllowedFields(fields):
"""Set the list of field names which can be set on an Account.
Fields are per-account key-value pairs and are exported to the mysql.user
table.
Args:
fields: a sequence of field names which must exist in the target table
"""
global _ALLOWED_FIELDS
_ALLOWED_FIELDS = fields
def SetAllowedComments(comments):
"""Set the list of comment names which can be set on an Account.
Comments are per-account key-value pairs. They are administrative-only and
are not exported (they do not affect the users' permissions).
Args:
comments: a sequence of comment field names
"""
global _ALLOWED_COMMENTS
_ALLOWED_COMMENTS = comments
class _BasePermission(object):
"""Common methods for other permission classes (user, db, etc.)."""
_valid_privs = 0
_child_class = None
_positive_privs = 0
_negative_privs = 0
def __init__(self, entity_name=None):
# Despite the fact that we use it in several functions here, we don't take
# information on our parents (e.g. _ColumnPermission doesn't know what
# Table/Database/User it's under) because we have to lazily bind User (due
# to Clone()), so we might as well lazily bind the rest.
self._children = {}
self.SetEntityName(entity_name)
def SetEntityName(self, entity_name):
"""Set the entity (user, database, table, column) name."""
self._entity_name = entity_name
def GetPositivePrivs(self):
"""Get the bitfield of positive privileges."""
return self._positive_privs
def GetOrCreateChild(self, child_name):
"""Get a child instance, or create it if it doesn't exist yet.
Args:
child_name: The entity_name to create the child with
"""
if child_name not in self._children:
self._children[child_name] = self._child_class(child_name)
return self._children[child_name]
def GetChildren(self):
"""Retrieve all child objects.
Returns:
Dictionary of (database, table, column) name -> permissions object.
"""
return self._children
def _CreateAllChildren(self, dbh, fixed_values):
"""Find all possible children (talking to the database) and create them.
Args:
dbh: Database handle
fixed_values: Dictionary of parent entity names
"""
possible_children = self._child_class.GetAllEntities(dbh, fixed_values)
for child_name in possible_children:
self.GetOrCreateChild(child_name)
def _ValidatePrivileges(self, privs):
"""Check that privs are valid for this permission type."""
if privs & self._valid_privs != privs:
raise InvalidPrivileges('Some privileges are not valid for %s' %
self.__class__.__name__)
def GrantPrivileges(self, privs):
"""Grant privileges, overriding negative privileges.
Args:
privs: Privileges bitfield.
"""
self._ValidatePrivileges(privs)
self._positive_privs |= privs
self._negative_privs &= ~privs
def RevokePrivileges(self, privs):
"""Revoke privileges, overriding positive privileges.
Args:
privs: Privileges bitfield.
"""
self._ValidatePrivileges(privs)
self._negative_privs |= privs
self._positive_privs &= ~privs
def PopulateTables(self, tables, fixed_values={}):
"""Populate tables from this instance and all children recursively."""
self.PopulateTable(tables[self._table_name], fixed_values)
fixed_values = fixed_values.copy()
fixed_values[self._entity_field] = self._entity_name
for child in self._children.values():
child.PopulateTables(tables, fixed_values)
def GetNegativePrivsRecursive(self):
"""Get negative privileges from this and all children."""
negative_privs = self._negative_privs
for child in self._children.values():
negative_privs |= child.GetNegativePrivsRecursive()
# Don't bubble up negative privs that we can satisfy here
negative_privs &= ~self._positive_privs
return negative_privs
def SoftGrantPrivileges(self, privs):
"""Grant a privilege only if we don't have a negative permission for it.
If we do have a negative permission, remove it, so traversing this tree
later should yield no negative permissions.
"""
soft_grant = self._negative_privs & privs
hard_grant = privs & ~soft_grant
self._negative_privs &= ~soft_grant
self._positive_privs |= hard_grant
def SoftRevokePrivileges(self, privs):
"""Revoke privileges without setting negative permissions for them."""
self._positive_privs &= ~privs
def PushDownPrivileges(self, dbh=None, fixed_values={}):
"""Remove negative privileges by pushing down positive privs.
Check for negative permissions from all descendants. Push down any negative
privs to immediate children. Ask children to do the same. This ends in a
state where there are no negative privs left (but many more positive privs
at leaves).
"""
fixed_values = fixed_values.copy()
fixed_values[self._entity_field] = self._entity_name
descendant_negative_privs = 0
for child in self._children.values():
descendant_negative_privs |= child.GetNegativePrivsRecursive()
push_down = descendant_negative_privs & self._positive_privs
if push_down:
self._CreateAllChildren(dbh, fixed_values)
self.SoftRevokePrivileges(push_down)
for child in self._children.itervalues():
child.SoftGrantPrivileges(push_down)
for child in self._children.values():
child.PushDownPrivileges(dbh, fixed_values)
def PrivilegesForPullUp(self):
"""Get privileges that could be pulled up from this child."""
return self._positive_privs & ~self._negative_privs
def IsEmpty(self):
"""Check if this child contains no useful content."""
return not(self._negative_privs or self._positive_privs or self._children)
def PullUpPrivileges(self, inherited=0):
"""Remove spurious positive privileges from children.
If children have no negative privileges and have positive privileges
completely covered by my positive privileges, then the children are
meaningless: remove them.
"""
inherited |= self._positive_privs
for name, child in self._children.items():
child.PullUpPrivileges(inherited)
privs = child.PrivilegesForPullUp()
child.SoftRevokePrivileges(inherited & privs)
if child.IsEmpty():
del self._children[name]
class _ColumnPrivsMixIn(object):
"""Methods for permissions tables that store perms in separate columns."""
_skip_row_without_privs = True
@classmethod
def BuildTable(cls):
fields = list(cls._fixed_fields)
for priv in _PRIVS:
if priv & cls._valid_privs:
fields.append(_FIELD_NAMES[priv])
return db.VirtualTable(fields, [])
def PopulateTable(self, table, fixed_values={}):
if self._skip_row_without_privs and not self._positive_privs:
return
values = []
for field in self._fixed_fields:
if field == self._entity_field:
values.append(self._entity_name)
else:
# If this throws KeyError, fixed_values is incomplete
values.append(fixed_values[field])
for priv in _PRIVS:
if not (priv & self._valid_privs):
continue
if priv & self._positive_privs:
values.append('Y')
else:
values.append('N')
table.Append(values)
class _SetPrivsMixIn(object):
"""Methods for permissions tables that store permissions in sets."""
def _PrivsToString(self, privs):
set_entries = []
for priv in _PRIVS:
if priv & privs:
set_entries.append(_FIELD_SET_NAMES[priv])
return ','.join(set_entries)
class _ColumnPermission(_BasePermission, _SetPrivsMixIn):
_valid_privs = (SELECT | INSERT | UPDATE | REFERENCES)
_table_name = 'columns_priv'
_entity_field = 'Column_name'
@classmethod
def BuildTable(cls):
return db.VirtualTable(('Host', 'Db', 'User', 'Table_name', 'Column_name',
'Column_priv'), [])
def PopulateTable(self, table, fixed_values):
if not self._positive_privs:
return
values = [
fixed_values['Host'],
fixed_values['Db'],
fixed_values['User'],
fixed_values['Table_name'],
self._entity_name,
self._PrivsToString(self._positive_privs),
]
table.Append(values)
@staticmethod
def GetAllEntities(dbh, fixed_values={}):
if not dbh:
raise NeedDBAccess('Need to retrieve column list from %s.%s' %
(str(fixed_values['Db']), fixed_values['Table_name']))
result = dbh.CachedExecuteOrDie(
'SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE'
' TABLE_SCHEMA=%(Db)s AND'
' TABLE_NAME=%(Table_name)s',
fixed_values)
return [row['COLUMN_NAME'] for row in result]
class _TablePermission(_BasePermission, _SetPrivsMixIn):
_valid_privs = (SELECT | INSERT | UPDATE | DELETE | CREATE | DROP | GRANT |
REFERENCES | INDEX | ALTER | CREATE_VIEW | SHOW_VIEW |
TRIGGER)
_child_class = _ColumnPermission
_table_name = 'tables_priv'
_entity_field = 'Table_name'
@classmethod
def BuildTable(cls):
# Column_priv in tables_priv tells MySQL to check the columns_priv table
# and allow it to grant just that set of permissions. It therefore has to
# be the bitwise OR of all positive column permissions under it. Yes, this
# is stupid.
return db.VirtualTable(('Host', 'Db', 'User', 'Table_name', 'Table_priv',
'Column_priv'), [])
def PopulateTable(self, table, fixed_values):
column_privs = 0
for child in self._children.itervalues():
column_privs |= child.GetPositivePrivs()
if not self._positive_privs and not column_privs:
return
if column_privs & self._positive_privs:
raise InvalidPrivileges(
'Duplication between table and column privileges for %s (%s): '
'(%s)/(%s)' % (
self._entity_name,
fixed_values,
self._PrivsToString(self._positive_privs),
self._PrivsToString(column_privs)))
values = [
fixed_values['Host'],
fixed_values['Db'],
fixed_values['User'],
self._entity_name,
self._PrivsToString(self._positive_privs),
self._PrivsToString(column_privs),
]
table.Append(values)
@staticmethod
def GetAllEntities(dbh, fixed_values={}):
if not dbh:
raise NeedDBAccess('Need to retrieve table list from %s' %
str(fixed_values['Db']))
result = dbh.CachedExecuteOrDie(
'SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE'
' TABLE_SCHEMA=%(Db)s', fixed_values)
return [row['TABLE_NAME'] for row in result]
class _DatabasePermission(_BasePermission, _ColumnPrivsMixIn):
_valid_privs = (SELECT | INSERT | UPDATE | DELETE | CREATE | DROP | GRANT |
REFERENCES | INDEX | ALTER | CREATE_TEMP_TABLE |
LOCK_TABLES | CREATE_VIEW | SHOW_VIEW | CREATE_ROUTINE |
ALTER_ROUTINE | EXECUTE | EVENT | TRIGGER)
_child_class = _TablePermission
_fixed_fields = ['Host', 'Db', 'User']
_table_name = 'db'
_entity_field = 'Db'
@staticmethod
def GetAllEntities(dbh, fixed_values={}):
if not dbh:
raise NeedDBAccess('Need to retrieve database list')
result = dbh.CachedExecuteOrDie(
'SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA WHERE'
' SCHEMA_NAME != DATABASE()')
return [row['SCHEMA_NAME'] for row in result] + [DEFAULT]
class _UserPermission(_BasePermission, _ColumnPrivsMixIn):
_valid_privs = (SELECT | INSERT | UPDATE | DELETE | CREATE| DROP | RELOAD |
SHUTDOWN | PROCESS | FILE | GRANT | REFERENCES | INDEX |
ALTER | SHOW_DATABASES | SUPER | CREATE_TEMP_TABLE |
LOCK_TABLES | EXECUTE | REPL_SLAVE | REPL_CLIENT |
CREATE_VIEW | SHOW_VIEW | CREATE_ROUTINE | ALTER_ROUTINE |
CREATE_USER | EVENT | TRIGGER)
_child_class = _DatabasePermission
_fixed_fields = ['Host', 'User', 'Password']
_table_name = 'user'
_entity_field = 'User'
_skip_row_without_privs = False
def _KeywordArgumentsOnly(func):
"""Decorator that only allows keyword, not positional, arguments."""
def PleaseUseKeywordArguments(self, **kwargs):
return func(self, **kwargs)
PleaseUseKeywordArguments.__doc__ = func.__doc__
return PleaseUseKeywordArguments
def _KeywordArgumentsOnlyGlobal(func):
"""Like KeywordArguments, but for global functions."""
def PleaseUseKeywordArguments(**kwargs):
return func(**kwargs)
PleaseUseKeywordArguments.__doc__ = func.__doc__
return PleaseUseKeywordArguments
class Account(object):
"""A database account or account template.
Includes both authentication and authorization data.
"""
@_KeywordArgumentsOnly
def __init__(self, username=None, password_hash=None, password=None,
encrypted_hash=None, **kwargs):
"""Creates a new account instance.
An Account can represent an actual exported account and/or a template. Note
that creating an account does not cause it to appear in permissions output;
Export() must be called at least once for this to happen.
In MySQL 5, three keyword arguments are required to prevent db.py
from throwing a QueryWarningsException:
ssl_cipher='', x509_issuer='', x509_subject=''
For this to work, call
SetAllowedFields(['ssl_cipher', 'x509_issuer', 'x509_subject'])
before instantiating any Accounts.
All other arguments are optional and must be keywords; see InitUser() and
SetFields() for available arguments.
Args: see InitUser() and SetFields().
"""
self._allowed_hosts = set()
self._extra_fields = {}
self._comments = {}
self._username = None
self._password_hash = None
self._encrypted_hash = None
# For each privilege set, we store positive and negative privilege bits.
# Privileges are defined as (positive & ~negative). More specific
# privileges override less-specific ones. As MySQL only supports positive
# permissions, the representation of this in SQL may be significantly more
# complicated than here.
self._perm = _UserPermission(username)
self.InitUser(username=username,
password_hash=password_hash,
password=password,
encrypted_hash=encrypted_hash)
self.SetFields(**kwargs)
def _ValidateUsername(self, username):
"""Validate the username based on MySQL's constraints.
This currently only raises an exception for usernames that are
too long. This is a hardcoded limit in MySQL. It is probably a
good idea to limit usernames to ascii-only. More info on username
limitations can be found here:
http://dev.mysql.com/doc/refman/5.1/en/user-names.html
Args:
username: the username to validate.
Raises:
InvalidUsername
"""
if len(username) > 16:
raise InvalidUsername('username "%s" is too long' % username)
@_KeywordArgumentsOnly
def InitUser(self, username=None, password_hash=None, password=None,
encrypted_hash=None):
"""Handle constructor or copy user/password initialization.
Any argument may be None, meaning "do not change".
Args:
username: the string that the user will supply as a username when
connecting to MySQL
password: the cleartext password for this account. Using password_hash
instead is strongly recommended, in which case this is ignored.
password_hash: the double-SHA1 hash of this user's password, as returned
by the MySQL (version 4.1 and higher) PASSWORD() command
encrypted_hash: the hash as above which was then encrypted with an RSA
public key and encoded into base64.
Returns:
self (allows method call chaining)
"""
if username is not None:
self._username = username
self._ValidateUsername(username)
self._perm.SetEntityName(username)
if password_hash is not None:
self._password_hash = password_hash
elif password is not None:
self._password_hash = utils.HashPassword(password)
elif encrypted_hash is not None:
self._password_hash = None # Set by Decrypt()
self._encrypted_hash = encrypted_hash
if not utils.TestEncryptedHash(self._encrypted_hash):
raise DecryptionFailed('%s has invalid encrypted hash' % self._username)
return self
@_KeywordArgumentsOnly
def Clone(self, username=None, password_hash=None, password=None,
encrypted_hash=None, **kwargs):
"""Clone an Account.
The username or password may be changed here as well.
Args: see InitUser() and SetFields()
Returns:
a new Account
"""
new = copy.deepcopy(self)
new.InitUser(username=username,
password_hash=password_hash,
password=password,
encrypted_hash=encrypted_hash)
new.SetFields(**kwargs)
return new
@_KeywordArgumentsOnly
def AddAllowedHost(self, hostname_pattern):
"""Add a hostname pattern from which the account is allowed to connect.
All privileges are granted equally to all hostname patterns; while this is
not a MySQL restriction, it is enforced here for sanity.
Args:
hostname_pattern: an IP address, hostname, or pattern
This can only be a hostname or hostname pattern if the MySQL servers
in question have reverse DNS resolution enabled (not recommended).
Examples:
All hosts: %
IP address: 1.2.3.4
IP address pattern: 1.2.3.%
IP and netmask: 1.2.3.0/255.255.255.128
Returns:
self (allows method call chaining)
"""
self._allowed_hosts.add(hostname_pattern)
return self
def ClearAllowedHosts(self):
"""Clear all hostname patterns previously added to this account.
Returns:
self (allows method call chaining)
"""
self._allowed_hosts.clear()
return self
def _FindEntity(self, database, table, column):
if not database:
return self._perm
database_perm = self._perm.GetOrCreateChild(database)
if not table:
return database_perm
table_perm = database_perm.GetOrCreateChild(table)
if not column:
return table_perm
column_perm = table_perm.GetOrCreateChild(column)
return column_perm
@_KeywordArgumentsOnly
def GrantPrivileges(self, database=None, table=None, column=None,
privileges=0):
"""Grant privileges at the user, database, table or column level.
The DB permissions system supports granting privileges at a wide level, e.g.
database, then revoking them at a specific level (e.g. column). Positive
privileges are propagated down properly.
Calling GrantPrivileges without setting 'database', 'table' or 'column'
grants privileges at the user level.
Args:
database: database name on which to grant privileges
The special constant DEFAULT is defined and indicates that the
permissions should be granted on the database passed with the --db
flag during the push/publish/dump operation. This is useful for
sharded databases, where the database may be named differently per
shard.
If not set, privileges are granted at the user level.
table: table name on which to grant privileges
If set, database must also be set.
column: column name on which to grant privileges
If set, database and table must also be set.
privileges: a bit mask of privileges to grant, created by ORing
together (using the | operator) the following privileges:
SELECT
INSERT
UPDATE
DELETE
CREATE
DROP
RELOAD
SHUTDOWN
PROCESS
FILE
GRANT
REFERENCES
INDEX
ALTER
SHOW_DATABASES
SUPER
CREATE_TEMP_TABLE
LOCK_TABLES
EXECUTE
REPL_SLAVE
REPL_CLIENT
CREATE_VIEW
SHOW_VIEW
CREATE_ROUTINE
ALTER_ROUTINE
CREATE_USER
The meta-privilege ALL_PRIVILEGES is also available.
Not all privileges are valid at all levels.
For details, see:
http://dev.mysql.com/doc/refman/5.0/en/privileges-provided.html
Returns:
self (allows method call chaining)
"""
self._FindEntity(database, table, column).GrantPrivileges(privileges)
return self
@_KeywordArgumentsOnly
def RevokePrivileges(self, database=None, table=None, column=None,
privileges=0):
"""Revoke privileges at the user, database, column, or table level.
See GrantPrivileges().
Calling RevokePrivileges() with the same options as a previous
GrantPrivileges() reverses the effects of the previous call, but
additionally overrides any redundant inherited permissions.
Args: see GrantPrivileges()
Returns:
self (allows method call chaining)
"""
self._FindEntity(database, table, column).RevokePrivileges(privileges)
return self
def SetFields(self, **kwargs):
"""Set key/value data that is exported to the user table or comments.
The keys must previously have been defined with either SetAllowedFields() or
SetAllowedComments(). The function is called with the key/value pairs as
named arguments, e.g. SetFields(max_connections=10). Fields supported by
the Ads MySQL build at the time of writing:
ssl_type
ssl_cipher (required)
x509_issuer (required)
x509_subject (required)
max_questions
max_updates
max_connections
max_user_connections
Raises:
InvalidKey: Raised when a parameter has not been defined with
SetAllowedFields() or SetAllowedComments().
Returns:
self (allows method call chaining)
"""
for key, value in kwargs.iteritems():
if key in _ALLOWED_FIELDS:
self._extra_fields[key] = value
elif key in _ALLOWED_COMMENTS:
self._comments[key] = value
else:
raise InvalidKey('%s is not a valid field name' % key)
return self
@_KeywordArgumentsOnly
def Export(self, set_name):
"""Export the Account to a permissions set.
This marks the account to be pushed during push/publish/dump operations.
An account may be exported more than once, to different sets. A single
permissions file can contain multiple permissions sets. A set represents a
group of accounts and their privileges that is pushed to a host and controls
all access to that host; only one set can be in effect on a host at any
given time.
Note that a copy of the Account is made for each call to Export(). Changes
made to the Account afterward will not affect any previous Export() calls,
but will affect future Export() calls.
Args:
set_name: the set name (e.g. "primary", "secondary") to which to export
this account
Raises:
MissingFieldsError: Raised when username or password are missing.
DuplicateAccount: Raised if this account has already been exported to the
set_name.
Returns:
self (allows method call chaining)
"""
if (self._username is None or (self._password_hash is None and
self._encrypted_hash is None)):
raise MissingFieldsError(
'Username and password/password hash/encrypted hash are required to '
'Export a user')
set_obj = SETS.setdefault(set_name, Set())
set_obj.AddAccount(self)
return self
def Decrypt(self, key):
"""Decrypt an encrypted hash for this user, if there is one."""
if self._encrypted_hash is not None and self._password_hash is None:
self._password_hash = utils.DecryptHash(key, self._encrypted_hash)
if not self._password_hash:
raise DecryptionFailed(self.GetUsername())
def GetUsername(self):
"""Return the username for this account.
This method is used during push/publish/dump operations and calls to it
should not appear in a permissions file.
Returns:
The username of the account.
"""
return self._username
def GetUserPermission(self):
"""Fetch the UserPermission object behind this account."""
return self._perm
def GetTables(self, dbh=None):
"""Generate tables containing privilege data for this user.
This method is used during push/publish/dump operations and calls to it
should not appear in a permissions file.
Args:
dbh: Database handle. Only used here for read-only, schema-description
queries.
Returns:
A dictionary where the keys are a table name in the mysql database and
the value is db.VirtualTable containing the output rows.
Raises:
NeedDBAccess
DecryptionRequired
"""
if self._encrypted_hash is not None and self._password_hash is None:
raise DecryptionRequired(self.GetUsername())
self._perm.PullUpPrivileges()
self._perm.PushDownPrivileges(dbh)
self._perm.PullUpPrivileges()
ret = {
'user': _UserPermission.BuildTable(),
'db': _DatabasePermission.BuildTable(),
'tables_priv': _TablePermission.BuildTable(),
'columns_priv': _ColumnPermission.BuildTable(),
}
fixed_values = {
'Password': self._password_hash,
}
if not self._allowed_hosts:
raise NoAllowedHosts('Account %s has no allowed hosts',
self.GetUsername())
for host in sorted(self._allowed_hosts):
fixed_values['Host'] = host
self._perm.PopulateTables(ret, fixed_values)
for key in sorted(self._extra_fields.iterkeys()):
ret['user'].AddField(key, self._extra_fields[key])
if self._password_hash is None:
del ret['user']
return ret
def GetComment(self, comment_name, default=None):
"""Fetch the value for a given comment name."""
return self._comments.get(comment_name, default)
class Set(object):
"""A set of accounts with permissions.
A permissions file can contain multiple permissions sets. A set is a named
group of permissions that are active together at a given time. The name to set
mapping is kept in the global SETS variable.
"""
def __init__(self):
# username -> Account
self._accounts = {}
def GetTables(self, dbh):
"""Generate a table set for each account, then merge them all together.
Args:
dbh: Database handle. Only used here for read-only, schema-description
queries.
Returns:
A dictionary where the keys are a table name in the mysql database and
the value is db.VirtualTable containing the output rows.
"""
tables = {}
for account in self._accounts.itervalues():
# We use a temporary account copy because GetAccountTables() alters the
# account object, expanding out the privileges. This can result in a much
# larger object, so we don't want to keep it around.
temp_account = account.Clone()
acct_tables = temp_account.GetTables(dbh)
for table, result in acct_tables.iteritems():
if table in tables:
tables[table].Merge(result)
else:
tables[table] = result
return tables
def AddAccount(self, account):
if account.GetUsername() in self._accounts:
raise DuplicateAccount(account.GetUsername())
self._accounts[account.GetUsername()] = account.Clone()
def SetAllFields(self, **kwargs):
# Split out fields from username/password/password_hash
fields = kwargs.copy()
if 'username' in fields: del fields['username']
if 'password' in fields: del fields['password']
if 'password_hash' in fields: del fields['password_hash']
if 'encrypted_hash' in fields: del fields['encrypted_hash']
for account in self._accounts.itervalues():
account.InitUser(username=kwargs.get('username', None),
password=kwargs.get('password', None),
password_hash=kwargs.get('password_hash', None),
encrypted_hash=kwargs.get('encrypted_hash', None))
account.SetFields(**fields)
def SetAllAllowedHosts(self, hostname_patterns):
for account in self._accounts.itervalues():
account.ClearAllowedHosts()
for hostname_pattern in hostname_patterns:
account.AddAllowedHost(hostname_pattern=hostname_pattern)
def GetAccount(self, username):
return self._accounts[username]
def GetAccounts(self):
return self._accounts.copy()
def Clone(self):
new_set = Set()
for account in self._accounts.itervalues():
new_set.AddAccount(account)
return new_set
def Decrypt(self, key):
"""Decrypt hashes for all accounts in this set."""
for account in self._accounts.itervalues():
account.Decrypt(key)
def GetComments(self, comment_names, username):
"""Fetch a list of comment values for a username."""
return (self._accounts[username].GetComment(comment_name)
for comment_name in comment_names)
# For use by code defining permissions:
@_KeywordArgumentsOnlyGlobal
def FindAccount(set_name, username):
"""Retrieve a previously-created account object."""
try:
set = SETS[set_name]
except KeyError:
raise NoSuchSet('Set %s has no exported accounts' % set_name)
try:
return set.GetAccount(username)
except KeyError:
raise NoSuchAccount('No account with username %s in set %s' % (
username, set_name))
@_KeywordArgumentsOnlyGlobal
def DuplicateSet(source_set_name, destination_set_name):
"""Create a new set that contains all accounts in an existing set."""
SETS[destination_set_name] = SETS[source_set_name].Clone()
@_KeywordArgumentsOnlyGlobal
def SetAllFields(set_name, **kwargs):
"""Set field values on every account in a set."""
SETS[set_name].SetAllFields(**kwargs)
@_KeywordArgumentsOnlyGlobal
def SetAllAllowedHosts(set_name, hostname_patterns):
"""Set allowed host patterns on every account in a set."""
SETS[set_name].SetAllAllowedHosts(hostname_patterns)
|
{
"content_hash": "f06abe5f4c169cfb9d3de8f513d0b1a4",
"timestamp": "",
"source": "github",
"line_count": 1081,
"max_line_length": 80,
"avg_line_length": 32.723404255319146,
"alnum_prop": 0.6590150958330978,
"repo_name": "vincentor/google-mysql-tools",
"id": "2ee041f6c187d45ed062b68a16fc37974399ea1f",
"size": "35971",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "permissions_lib/define.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5563"
},
{
"name": "Python",
"bytes": "422962"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division
import argparse
import time
import string
import random
import jinja2
import numpy as np
import pyopencl as cl
import subprocess
import os
from os.path import join
from gpuexperiments.callkernel import call_cl_kernel
from gpuexperiments.timecheck import inittime, timecheck
import lib_clgpuexp
from lib_clgpuexp import clearComputeCache, getPtx, timeKernel3d, buildKernel, initClGpu
from gpuexperiments.deviceinfo import DeviceInfo
parser = argparse.ArgumentParser()
parser.add_argument('--printptx', type=bool, default=False, help='note that it erases your nv cache')
parser.add_argument('--exp')
args = parser.parse_args()
initClGpu()
times = []
di = DeviceInfo(lib_clgpuexp.device)
code_template = r"""
kernel void {{kernelname}} (global float *data, global float *out) {
int gid = get_global_id(0);
int blockSize = get_local_size(0);
int gridSize = get_num_groups(0);
int outerBlockSize = blockSize * gridSize;
int offset = 0;
for(int i = 0; i < {{its}}; i++) {
out[offset + gid] = 123.0f;
offset += outerBlockSize;
offset = offset >= {{maxOffset}} ? 0 : offset;
}
}
"""
experiments = [
{'name': 'gridsize{gridsize}', 'code': code_template}
]
full_occupancy_bsm = 32 # this should probably not be hard coded...
for experiment in experiments:
if args.exp is not None and args.exp not in experiment['name']:
continue
its = 200000
template = jinja2.Template(experiment['code'], undefined=jinja2.StrictUndefined)
blockSize = 32
grid_x = 1
grid_max = 64 if di.deviceSimpleName == '940m' else 128
while grid_x <= grid_max:
name = experiment['name'].format(gridsize=grid_x)
if args.printptx:
clearComputeCache()
maxOffset = 4 * 1024 * 1024
source = template.render(kernelname=name, its=its, maxOffset=maxOffset, **experiment)
# print('source', source)
try:
kernel = buildKernel(name, source)
print('built kernel')
block = (blockSize, 1, 1)
grid = (grid_x, 1, 1)
for it in range(2):
t = timeKernel3d(name, kernel, grid=grid, block=block, add_args=[
])
t_sum = 0
for it in range(3):
t_sum += timeKernel3d(name, kernel, grid=grid, block=block, add_args=[
])
# print(getPtx(name))
t = t_sum / 3
except Exception as e:
print(e)
break
# flops = its * block / (t/1000) * 2
# * 2, because we copy data in both directions, ie twice
typeSize = 4
bandwidth_gib = its * grid[0] * grid[1] * block[0] * block[1] * typeSize / (t/1000) / 1024 / 1024 / 1024
print('bandwidth_gib', bandwidth_gib)
times.append({'name': name, 'time': t, 'bandwidth_gib': bandwidth_gib})
# grid_x *= 2
if grid_x <= 8:
grid_x *= 2
else:
grid_x += 8
f = open('/tmp/globalwrite_gridsize_%s.tsv' % di.deviceSimpleName, 'w')
line = 'name\ttot ms\tbw gib'
print(line)
f.write(line + '\n')
for time_info in times:
line = '%s\t%.1f\t%.2f' % (time_info['name'], time_info['time'], time_info['bandwidth_gib'])
print(line)
f.write(line + '\n')
f.close()
|
{
"content_hash": "772f67a174ee30570c4c6fba0aaf3b55",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 112,
"avg_line_length": 31.287037037037038,
"alnum_prop": 0.5984018940514946,
"repo_name": "hughperkins/gpu-experiments",
"id": "fde65edefe8fa5eef526d8ccfd237aca58ceff7d",
"size": "3379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gpuexperiments/globalwrite_gridsize.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "20127"
},
{
"name": "Python",
"bytes": "162953"
},
{
"name": "Shell",
"bytes": "777"
}
],
"symlink_target": ""
}
|
import pymel.core as pymel
from omtk.core.classCtrl import BaseCtrl
from omtk.core import classCtrlModel
from omtk.modules import rigFaceAvar
from omtk.modules import rigFaceAvarGrps
from omtk.libs import libRigging
class AvarEye(rigFaceAvar.AvarSimple):
"""
Deprecated, defined for backward compatibility (so libSerialization recognize it and we can access the ctrl shapes)
"""
pass
class CtrlEyes(BaseCtrl):
def __createNode__(self, width=1.0, height=1.0, normal=(0, 0, 1), *args, **kwargs):
p1 = [0, height, 0]
p2 = [width * 0.5, height * 0.95, 0]
p3 = [width, 0, 0]
p4 = [width * 0.5, -height * 0.95, 0]
p5 = [0, -height, 0]
p6 = [-width * 0.5, -height * 0.95, 0]
p7 = [-width, 0, 0]
p8 = [-width * 0.5, height * 0.95, 0]
node = pymel.curve(d=2, p=[p1, p2, p3, p4, p5, p6, p7, p8, p1])
return node
class CtrlEye(BaseCtrl):
def __createNode__(self, normal=(0, 0, 1), *args, **kwargs):
return super(CtrlEye, self).__createNode__(normal=normal, *args, **kwargs)
class BaseAvarCtrlModel(classCtrlModel.BaseCtrlModel):
_CLS_CTRL = BaseCtrl
def __init__(self, *args, **kwargs):
super(BaseAvarCtrlModel, self).__init__(*args, **kwargs)
self.ctrl = None
def get_default_tm_ctrl(self):
if self.jnt:
return self.jnt.getMatrix(worldSpace=True)
raise Exception("Cannot resolve ctrl transformation matrix!")
# todo: implement correct build method that also create the ctrl.
def build(self, avar, ctrl_tm=None, ctrl_size=1.0, **kwargs):
super(BaseAvarCtrlModel, self).build(avar, **kwargs)
# Resolve ctrl matrix
if ctrl_tm is None:
ctrl_tm = self.get_default_tm_ctrl()
if ctrl_tm:
self.ctrl.setMatrix(ctrl_tm)
def connect(self, avar, ud=True, fb=True, lr=True, yw=True, pt=True, rl=True, sx=True, sy=True, sz=True):
raise NotImplementedError
class ModelLookAt(BaseAvarCtrlModel):
"""
This controller avars from an object aimConstrained to a ctrl.
"""
_CLS_CTRL = BaseCtrl
def __init__(self, *args, **kwargs):
super(ModelLookAt, self).__init__(*args, **kwargs)
self._attr_out_lr = None
self._attr_out_ud = None
self._attr_out_fb = None
self._attr_out_yw = None
self._attr_out_pt = None
self._attr_out_rl = None
def get_default_tm_ctrl(self):
"""
Find the chin location. This is the preffered location for the jaw doritos.
"""
jnt_pos = self.jnt.getTranslation(space='world')
head_jnt = self.get_head_jnt()
head_length = self.rig.get_head_length(head_jnt)
if not head_length:
pymel.warning("Can't resolve head length! The eyes ctrl location might be erroned.")
offset_z = head_length * 2 if head_length else 0
return pymel.datatypes.Matrix(
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
jnt_pos.x,
jnt_pos.y,
jnt_pos.z + offset_z
)
def build(self, avar, ref=None, ref_tm=None, ctrl_tm=None, ctrl_size=1.0, **kwargs):
super(ModelLookAt, self).build(avar, ctrl_tm=ctrl_tm, ctrl_size=ctrl_size, **kwargs)
nomenclature_rig = self.get_nomenclature_rig()
# Build an aim node in-place for performance
# This separated node allow the joints to be driven by the avars.
aim_grp_name = nomenclature_rig.resolve('lookgrp')
aim_grp = pymel.createNode('transform', name=aim_grp_name)
aim_grp.setParent(self.grp_rig)
aim_node_name = nomenclature_rig.resolve('looknode')
aim_node = pymel.createNode('transform', name=aim_node_name)
aim_node.setParent(aim_grp)
aim_grp.setTranslation(self.jnt.getTranslation(space='world'))
# aim_grp.setMatrix(self.jnt.getMatrix(worldSpace=True))
if self.parent:
pymel.parentConstraint(self.parent, aim_grp, maintainOffset=True)
aim_target_name = nomenclature_rig.resolve('target')
aim_target = pymel.createNode('transform', name=aim_target_name)
aim_target.setParent(aim_grp)
self.target = aim_target # todo: remove?
pymel.pointConstraint(self.ctrl, aim_target, maintainOffset=False)
# Build an upnode for the eyes.
# I'm not a fan of upnodes but in this case it's better to guessing the joint orient.
aim_upnode_name = nomenclature_rig.resolve('upnode')
aim_upnode = pymel.createNode('transform', name=aim_upnode_name)
aim_upnode.setParent(self.grp_rig)
pymel.parentConstraint(aim_grp, aim_upnode, maintainOffset=True)
pymel.aimConstraint(aim_target, aim_node,
maintainOffset=True,
aimVector=(0.0, 0.0, 1.0),
upVector=(0.0, 1.0, 0.0),
worldUpObject=aim_upnode,
worldUpType='object'
)
# Position objects
aim_grp.setTranslation(self.jnt.getTranslation(space='world'))
# aim_grp.setParent(self._grp_offset) # todo: add begin , end property
# aim_grp.t.set(0, 0, 0)
# aim_grp.r.set(0, 0, 0)
jnt_tm = self.jnt.getMatrix(worldSpace=True)
jnt_pos = jnt_tm.translate
aim_upnode_pos = pymel.datatypes.Point(0, 1, 0) + jnt_pos
aim_upnode.setTranslation(aim_upnode_pos, space='world')
aim_target_pos = pymel.datatypes.Point(0, 0, 1) + jnt_pos
aim_target.setTranslation(aim_target_pos, space='world')
# pymel.parentConstraint(aim_node, stack, maintainOffset=True)
# Convert the rotation to avars to additional values can be added.
util_decomposeMatrix = libRigging.create_utility_node('decomposeMatrix', inputMatrix=aim_node.matrix)
self._attr_out_lr = util_decomposeMatrix.outputTranslateX
self._attr_out_ud = util_decomposeMatrix.outputTranslateY
self._attr_out_fb = util_decomposeMatrix.outputTranslateZ
self._attr_out_yw = util_decomposeMatrix.outputRotateY
self._attr_out_pt = util_decomposeMatrix.outputRotateX
self._attr_out_rl = util_decomposeMatrix.outputRotateZ
def connect(self, avar, ud=True, fb=True, lr=True, yw=True, pt=True, rl=True, sx=True, sy=True, sz=True):
libRigging.connectAttr_withBlendWeighted(self._attr_out_lr, avar.attr_lr)
libRigging.connectAttr_withBlendWeighted(self._attr_out_ud, avar.attr_ud)
libRigging.connectAttr_withBlendWeighted(self._attr_out_fb, avar.attr_fb)
libRigging.connectAttr_withBlendWeighted(self._attr_out_yw, avar.attr_yw)
libRigging.connectAttr_withBlendWeighted(self._attr_out_pt, avar.attr_pt)
libRigging.connectAttr_withBlendWeighted(self._attr_out_rl, avar.attr_rl)
class FaceEyes(rigFaceAvarGrps.AvarGrp):
"""
Look-at setup with avars support.
"""
IS_SIDE_SPECIFIC = False
SHOW_IN_UI = True
SINGLE_PARENT = True
_CLS_MODEL_CTRL_MICRO = ModelLookAt
_CLS_CTRL_MICRO = CtrlEye
def __init__(self, *args, **kwargs):
"""
Pre-declare here all the used members.
"""
super(FaceEyes, self).__init__(*args, **kwargs)
self.ctrl_all = None
def validate_version(self, major_version, minor_version, patch_version):
# See internal task #67368
if major_version == 0 and minor_version == 4 and patch_version < 24:
raise Exception("This version have issues with the space-switch 'Head' target.")
def handle_surface(self):
pass # todo: better class schema!
def get_default_name(self):
return 'Eyes'
def get_parent_obj(self, **kwargs):
result = self.get_head_jnt(strict=False)
return result or super(FaceEyes, self).get_parent_obj(**kwargs)
def build(self, *args, **kwargs):
if self.parent is None:
raise Exception("Can't build FaceEyes, no parent found!")
super(FaceEyes, self).build(parent=True, *args, **kwargs)
nomenclature_anm = self.get_nomenclature_anm()
# Resolve average position of each ctrls.
# This is used as the position of the main ctrl.
ctrl_default_size = 1 # TODO: Compute automatically
ctrl_pos_average = pymel.datatypes.Vector()
ctrl_positions = []
x_min = None
x_max = None
y_min = None
y_max = None
for avar in self.avars:
pos = avar.ctrl.getTranslation(space='world')
ctrl_positions.append(pos)
ctrl_pos_average += pos
if x_min is None or pos.x < x_min:
x_min = pos.x
if x_max is None or pos.x > x_max:
x_max = pos.x
if y_min is None or pos.y < y_min:
y_min = pos.y
if y_max is None or pos.y > y_max:
y_max = pos.y
ctrl_pos_average /= len(self.jnts)
width = max(ctrl_default_size, abs(x_max - x_min)) + ctrl_default_size
height = max(ctrl_default_size, abs(y_max - y_min)) + ctrl_default_size
# Define main ctrl
self.ctrl_all = self.init_ctrl(CtrlEyes, self.ctrl_all)
ctrl_all_name = nomenclature_anm.resolve()
self.ctrl_all.build(width=width, height=height)
self.ctrl_all.setTranslation(ctrl_pos_average)
jnt_head = self.get_parent_obj()
self.ctrl_all.create_spaceswitch(self, jnt_head, add_local=True, local_label='Head', local_target=jnt_head,
add_world=True)
self.ctrl_all.rename(ctrl_all_name)
self.ctrl_all.setParent(self.grp_anm)
# Make all eyes ctrls follow the main ctrl
for avar in self.avars:
avar.ctrl.setParent(self.ctrl_all)
def unbuild(self):
"""
If you are using sub-modules, you might want to clean them here.
:return:
"""
super(FaceEyes, self).unbuild()
def iter_ctrls(self):
for ctrl in super(FaceEyes, self).iter_ctrls():
yield ctrl
yield self.ctrl_all
def calibrate(self):
"""
It is not possible to calibrate the eyes since they have no avar on surface.
This will hide the function from the UI.
"""
pass
def register_plugin():
return FaceEyes
|
{
"content_hash": "93d10e23330310590ccbb70f8ee7e078",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 119,
"avg_line_length": 38.17883211678832,
"alnum_prop": 0.6095975528152184,
"repo_name": "SqueezeStudioAnimation/omtk",
"id": "1f8b496d3b3a819176545bd60e1e8056170f345d",
"size": "10461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/omtk/modules/rigFaceEyes.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Mathematica",
"bytes": "1124321"
},
{
"name": "Python",
"bytes": "1054644"
},
{
"name": "Shell",
"bytes": "143"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.