code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def __init__(self):
self.ans = 0
def dfs(self, root, path):
if root.left is None and root.right is None:
print path
self.ans += int(''.join(map(str,path)))
if root.left is not None:
path.append(root.left.val)
self.dfs(root.left, path)
path.pop()
if root.right is not None:
path.append(root.right.val)
self.dfs(root.right, path)
path.pop()
def sumNumbers(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root is not None:
self.dfs(root, [root.val])
return self.ans
|
xingjian-f/Leetcode-solution
|
129. Sum Root to Leaf Numbers.py
|
Python
|
mit
| 719
|
import sys
from resources.datatables import Options
def setup(core, object):
#object.setAttachment('radial_filename', 'object/conversation');
#object.setAttachment('conversationFile','junk_dealer')
object.setOptionsBitmask(Options.CONVERSABLE | Options.INVULNERABLE)
object.setStfFilename('mob/creature_names')
object.setStfName('heroic_tusken_javran_mu')
return
|
agry/NGECore2
|
scripts/object/mobile/javran_mu.py
|
Python
|
lgpl-3.0
| 369
|
#!/usr/bin/env python
"""Graphical interface for DropTheBeat."""
import os
import sys
import argparse
from tkinter import * # pylint: disable=wildcard-import,unused-wildcard-import
from tkinter import messagebox, simpledialog, filedialog
from tkinter.ttk import * # pylint: disable=wildcard-import,unused-wildcard-import
from itertools import chain
import logging
from . import GUI, __version__
from . import share, user
from . import settings
from .common import SHARED, WarningFormatter
_LAUNCH = True
class Application(Frame): # pylint: disable=too-many-instance-attributes
"""Tkinter application for DropTheBeat."""
def __init__(self, master=None, root=None, home=None, name=None):
Frame.__init__(self, master)
# Load the root sharing directory
self.root = root or share.find(home)
# Load the user
self.user = user.User(os.path.join(self.root, name)) if name else None
try:
self.user = self.user or user.get_current(self.root)
except EnvironmentError:
while True:
msg = "Enter your name in the form 'First Last':"
text = simpledialog.askstring("Create a User", msg)
logging.debug("text: {}".format(repr(text)))
name = text.strip(" '") if text else None
if not name:
raise KeyboardInterrupt("no user specified")
try:
self.user = user.User.new(self.root, name)
except EnvironmentError:
existing = user.User(os.path.join(self.root, name))
msg = "Is this you:"
for info in existing.info:
msg += "\n\n'{}' on '{}'".format(info[1], info[0])
if not existing.info or \
messagebox.askyesno("Add to Existing User", msg):
self.user = user.User.add(self.root, name)
break
else:
break
# Create variables
self.path_root = StringVar(value=self.root)
self.path_downloads = StringVar(value=self.user.path_downloads)
self.outgoing = []
self.incoming = []
# Initialize the GUI
self.listbox_outgoing = None
self.listbox_incoming = None
frame = self.init(master)
frame.pack(fill=BOTH, expand=1)
# Show the GUI
master.deiconify()
self.update()
def init(self, root):
"""Initialize frames and widgets."""
# pylint: disable=line-too-long
mac = sys.platform == 'darwin'
# Shared keyword arguments
kw_f = {'padding': 5} # constructor arguments for frames
kw_gp = {'padx': 5, 'pady': 5} # grid arguments for padded widgets
kw_gs = {'sticky': NSEW} # grid arguments for sticky widgets
kw_gsp = dict(chain(kw_gs.items(), kw_gp.items())) # grid arguments for sticky padded widgets
# Configure grid
frame = Frame(root, **kw_f)
frame.rowconfigure(0, weight=0)
frame.rowconfigure(2, weight=1)
frame.rowconfigure(4, weight=1)
frame.columnconfigure(0, weight=1)
# Create widgets
def frame_settings(master):
"""Frame for the settings."""
frame = Frame(master, **kw_f)
# Configure grid
frame.rowconfigure(0, weight=1)
frame.rowconfigure(1, weight=1)
frame.columnconfigure(0, weight=0)
frame.columnconfigure(1, weight=1)
frame.columnconfigure(2, weight=0)
# Place widgets
Label(frame, text="Shared:").grid(row=0, column=0, sticky=W, **kw_gp)
Entry(frame, state='readonly', textvariable=self.path_root).grid(row=0, column=1, columnspan=2, **kw_gsp)
Label(frame, text="Downloads:").grid(row=1, column=0, sticky=W, **kw_gp)
Entry(frame, state='readonly', textvariable=self.path_downloads).grid(row=1, column=1, **kw_gsp)
Button(frame, text="...", width=0, command=self.browse_downloads).grid(row=1, column=2, ipadx=5, **kw_gp)
return frame
def frame_incoming(master):
"""Frame for incoming songs."""
frame = Frame(master, **kw_f)
# Configure grid
frame.rowconfigure(0, weight=1)
frame.rowconfigure(1, weight=0)
frame.columnconfigure(0, weight=0)
frame.columnconfigure(1, weight=1)
frame.columnconfigure(2, weight=1)
# Place widgets
self.listbox_incoming = Listbox(frame, selectmode=EXTENDED if mac else MULTIPLE)
self.listbox_incoming.grid(row=0, column=0, columnspan=3, **kw_gsp)
scroll_incoming = Scrollbar(frame, orient=VERTICAL, command=self.listbox_incoming.yview)
self.listbox_incoming.configure(yscrollcommand=scroll_incoming.set)
scroll_incoming.grid(row=0, column=2, sticky=(N, E, S))
Button(frame, text="\u21BB", width=0, command=self.update).grid(row=1, column=0, sticky=SW, ipadx=5, **kw_gp)
Button(frame, text="Ignore Selected", command=self.do_ignore).grid(row=1, column=1, sticky=SW, ipadx=5, **kw_gp)
Button(frame, text="Download Selected", command=self.do_download).grid(row=1, column=2, sticky=SE, ipadx=5, **kw_gp)
return frame
def frame_outgoing(master):
"""Frame for outgoing songs."""
frame = Frame(master, **kw_f)
# Configure grid
frame.rowconfigure(0, weight=1)
frame.rowconfigure(1, weight=0)
frame.columnconfigure(0, weight=0)
frame.columnconfigure(1, weight=1)
frame.columnconfigure(2, weight=1)
# Place widgets
self.listbox_outgoing = Listbox(frame, selectmode=EXTENDED if mac else MULTIPLE)
self.listbox_outgoing.grid(row=0, column=0, columnspan=3, **kw_gsp)
scroll_outgoing = Scrollbar(frame, orient=VERTICAL, command=self.listbox_outgoing.yview)
self.listbox_outgoing.configure(yscrollcommand=scroll_outgoing.set)
scroll_outgoing.grid(row=0, column=2, sticky=(N, E, S))
Button(frame, text="\u21BB", width=0, command=self.update).grid(row=1, column=0, sticky=SW, ipadx=5, **kw_gp)
Button(frame, text="Remove Selected", command=self.do_remove).grid(row=1, column=1, sticky=SW, ipadx=5, **kw_gp)
Button(frame, text="Share Songs...", command=self.do_share).grid(row=1, column=2, sticky=SE, ipadx=5, **kw_gp)
return frame
def separator(master):
"""Widget to separate frames."""
return Separator(master)
# Place widgets
frame_settings(frame).grid(row=0, **kw_gs)
separator(frame).grid(row=1, padx=10, pady=5, **kw_gs)
frame_outgoing(frame).grid(row=2, **kw_gs)
separator(frame).grid(row=3, padx=10, pady=5, **kw_gs)
frame_incoming(frame).grid(row=4, **kw_gs)
return frame
def browse_downloads(self):
"""Browser for a new downloads directory."""
path = filedialog.askdirectory()
logging.debug("path: {}".format(path))
if path:
self.user.path_downloads = path
self.path_downloads.set(self.user.path_downloads)
def do_remove(self):
"""Remove selected songs."""
for index in (int(s) for s in self.listbox_outgoing.curselection()):
self.outgoing[index].ignore()
self.update()
def do_share(self):
"""Share songs."""
paths = filedialog.askopenfilenames()
if isinstance(paths, str): # http://bugs.python.org/issue5712
paths = self.master.splitlist(paths)
logging.debug("paths: {}".format(paths))
for path in paths:
self.user.recommend(path)
self.update()
def do_ignore(self):
"""Ignore selected songs."""
for index in (int(s) for s in self.listbox_incoming.curselection()):
song = self.incoming[index]
song.ignore()
self.update()
def do_download(self):
"""Download selected songs."""
indicies = (int(s) for s in self.listbox_incoming.curselection())
try:
for index in indicies:
song = self.incoming[index]
song.download(catch=False)
except IOError as exc:
self.show_error_from_exception(exc, "Download Error")
self.update()
def update(self):
"""Update the list of outgoing and incoming songs."""
# Cleanup outgoing songs
self.user.cleanup()
# Update outgoing songs list
logging.info("updating outgoing songs...")
self.outgoing = list(self.user.outgoing)
self.listbox_outgoing.delete(0, END)
for song in self.outgoing:
self.listbox_outgoing.insert(END, song.out_string)
# Update incoming songs list
logging.info("updating incoming songs...")
self.incoming = list(self.user.incoming)
self.listbox_incoming.delete(0, END)
for song in self.incoming:
self.listbox_incoming.insert(END, song.in_string)
@staticmethod
def show_error_from_exception(exception, title="Error"):
"""Convert an exception to an error dialog."""
message = str(exception)
message = message[0].upper() + message[1:]
if ": " in message:
message.replace(": ", ":\n\n")
else:
message += "."
messagebox.showerror(title, message)
def main(args=None):
"""Process command-line arguments and run the program."""
# Main parser
parser = argparse.ArgumentParser(prog=GUI, description=__doc__, **SHARED)
parser.add_argument('--home', metavar='PATH', help="path to home directory")
# Hidden argument to override the root sharing directory path
parser.add_argument('--root', metavar="PATH", help=argparse.SUPPRESS)
# Hidden argument to run the program as a different user
parser.add_argument('--test', metavar='"First Last"',
help=argparse.SUPPRESS)
# Parse arguments
args = parser.parse_args(args=args)
# Configure logging
_configure_logging(args.verbose)
# Run the program
try:
success = run(args)
except KeyboardInterrupt:
logging.debug("program manually closed")
else:
if success:
logging.debug("program exited")
else:
logging.debug("program exited with error")
sys.exit(1)
def _configure_logging(verbosity=0):
"""Configure logging using the provided verbosity level (0+)."""
# Configure the logging level and format
if verbosity == 0:
level = settings.VERBOSE_LOGGING_LEVEL
default_format = settings.DEFAULT_LOGGING_FORMAT
verbose_format = settings.VERBOSE_LOGGING_FORMAT
else:
level = settings.VERBOSE2_LOGGING_LEVEL
default_format = verbose_format = settings.VERBOSE_LOGGING_FORMAT
# Set a custom formatter
logging.basicConfig(level=level)
formatter = WarningFormatter(default_format, verbose_format)
logging.root.handlers[0].setFormatter(formatter)
def run(args):
"""Start the GUI."""
root = Tk()
root.title("{} (v{})".format(GUI, __version__))
root.minsize(500, 500)
# Map the Mac 'command' key to 'control'
if sys.platform == 'darwin':
root.bind_class('Listbox', '<Command-Button-1>',
root.bind_class('Listbox', '<Control-Button-1>'))
# Temporarily hide the window for other dialogs
root.withdraw()
# Start the application
try:
app = Application(master=root, home=args.home,
root=args.root, name=args.test)
if _LAUNCH:
app.mainloop()
except Exception as e: # pylint: disable=broad-except
Application.show_error_from_exception(e)
return False
return True
if __name__ == '__main__': # pragma: no cover (manual test)
main()
|
jacebrowning/dropthebeat
|
dtb/gui.py
|
Python
|
lgpl-3.0
| 12,164
|
import sys
import os
import time
from itertools import tee, imap, izip, izip_longest, product, cycle, islice, chain
from functools import partial
import traceback
from multiprocessing import Pool
import random
import string
import math
import importlib
import imp
from ctypes import cdll
from contextlib import contextmanager
import pkg_resources
import numpy as np
from numpy.lib import recfunctions as nprf
__eta__ = 1e-100
def get_shared_lib(name):
"""Cross-platform resolution of shared-object libraries, working
around vagueries of setuptools
"""
try:
# after 'python setup.py install' we should be able to do this
lib_file = importlib.import_module(name).__file__
except Exception as e:
try:
# after 'python setup.py develop' this should work
lib_file = imp.find_module(name)[1]
except Exception as e:
raise ImportError('Cannot locate C library for event detection.')
else:
lib_file = os.path.abspath(lib_file)
finally:
library = cdll.LoadLibrary(lib_file)
return library
def nanonet_resource(filename, subfolder='data'):
return pkg_resources.resource_filename('nanonet',
os.path.join(subfolder, filename))
comp = {
'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C', 'X': 'X', 'N': 'N',
'a': 't', 't': 'a', 'c': 'g', 'g': 'c', 'x': 'x', 'n': 'n',
'-': '-'
}
def all_kmers(alphabet='ACGT', length=5, rev_map=False):
""" Find all possible kmers of given length.
.. Warning::
The return value type of this function is dependent on the input
arguments
:param alphabet: string from which to draw characters
:param length: length of kmers required
:param rev_map: return also a dictionary containing the reverse mapping i.e. {'AAA':0, 'AAC':1}
:returns: a list of strings. kmers are sorted by the ordering of the *alphabet*. If *rev_map*
is specified a second item is returned as noted above.
"""
fwd_map = map(lambda x: ''.join(x), product(alphabet, repeat=length))
if not rev_map:
return fwd_map
else:
return fwd_map, dict(zip(fwd_map, xrange(len(fwd_map))))
def all_nmers(n=3, alpha='ACGT'):
return all_kmers(length=n, alphabet=alpha)
def com(k):
""" Return complement of base.
Performs the subsitutions: A<=>T, C<=>G, X=>X for both upper and lower
case. The return value is identical to the argument for all other values.
"""
try:
return comp[k]
except KeyError:
sys.stderr.write("WARNING: No reverse complement for {} found, returning argument.".format(k))
return k
def rc_kmer(seq):
""" Return reverse complement of a string (base) sequence. """
return reduce(lambda x,y: x+y, map(com, seq[::-1]))
def kmers_to_annotated_sequence(kmers):
""" From a sequence of kmers calculate a contiguous symbol string
and a list indexing the first kmer in which the symbol was observed.
*Returns* a tuple containing:
================ ======================================================
*sequence* contiguous symbol string
*indices* indices of *kmers* with first occurence of
corresponding symbol in *sequence*
================ ======================================================
"""
overlaps = kmer_overlap(kmers)
sequence = kmers_to_call(kmers, overlaps)
pos = np.cumsum(overlaps, dtype=int)
indices = [-1] * len(sequence)
lastpos = -1
for i, p in enumerate(pos):
if p != lastpos:
indices[p] = i
lastpos = p
return sequence, indices
def kmer_overlap(kmers, moves=None, it=False):
"""From a list of kmers return the character shifts between them.
(Movement from i to i+1 entry, e.g. [AATC,ATCG] returns [0,1]).
:param kmers: sequence of kmer strings.
:param moves: allowed movements, if None all movements to length of kmer
are allowed.
:param it: yield values instead of returning a list.
Allowed moves may be specified in moves argument in order of preference.
"""
if it:
return kmer_overlap_gen(kmers, moves)
else:
return list(kmer_overlap_gen(kmers, moves))
def kmer_overlap_gen(kmers, moves=None):
"""From a list of kmers return the character shifts between them.
(Movement from i to i+1 entry, e.g. [AATC,ATCG] returns [0,1]).
Allowed moves may be specified in moves argument in order of preference.
:param moves: allowed movements, if None all movements to length of kmer
are allowed.
"""
first = True
yield 0
for last_kmer, this_kmer in window(kmers, 2):
if first:
if moves is None:
l = len(this_kmer)
moves = range(l + 1)
first = False
l = len(this_kmer)
for j in moves:
if j < 0:
if last_kmer[:j] == this_kmer[-j:]:
yield j
break
elif j > 0 and j < l:
if last_kmer[j:l] == this_kmer[0:-j]:
yield j
break
elif j == 0:
if last_kmer == this_kmer:
yield 0
break
else:
yield l
break
def kmers_to_call(kmers, moves):
"""From a list of kmers and movements, produce a basecall.
:param kmers: iterable of kmers
:param moves: iterbale of character overlaps between kmers
"""
# We use izip longest to check that iterables are same length
bases = None
for kmer, move in izip_longest(kmers, moves, fillvalue=None):
if kmer is None or move is None:
raise RuntimeError('Lengths of kmers and moves must be equal (kmers={} and moves={}.'.format(len(kmers), len(moves)))
if move < 0 and not math.isnan(x):
raise RuntimeError('kmers_to_call() cannot perform call when backward moves are present.')
if bases is None:
bases = kmer
else:
if math.isnan(move):
bases = bases + 'N' + kmer
else:
bases = bases + kmer[len(kmer) - int(move):len(kmer)]
return bases
def kmers_to_sequence(kmers):
"""Convert a sequence of kmers into a contiguous symbol string.
:param kmers: list of kmers from which to form a sequence
.. note:
This is simply a convenient synthesis of :func:`kmer_overlap`
and :func:`kmers_to_call`
"""
return kmers_to_call(kmers, kmer_overlap(kmers))
def random_string(length=6):
"""Return a random upper-case string of given length.
:param length: length of string to return.
"""
return ''.join(random.choice(string.ascii_uppercase) for _ in range(length))
def conf_line(option, value, pad=30):
return '{} = {}\n'.format(option.ljust(pad), value)
def window(iterable, size):
"""Create an iterator returning a sliding window from another iterator.
:param iterable: iterable object.
:param size: size of window.
"""
iters = tee(iterable, size)
for i in xrange(1, size):
for each in iters[i:]:
next(each, None)
return izip(*iters)
def group_by_list(iterable, group_sizes):
"""Yield successive varying size lists from iterator"""
sizes = cycle(group_sizes)
it = iter(iterable)
while True:
chunk_it = islice(it, sizes.next())
try:
first_el = next(chunk_it)
except StopIteration:
break
yield list(chain((first_el,), chunk_it))
class AddFields(object):
"""Helper to add numerous fields to a numpy array. (Syntactic
sugar around numpy.lib.recfunctions.append_fields)."""
def __init__(self, array):
self.array = array
self.data = []
self.fields = []
self.dtypes = []
def add(self, field, data, dtype=None):
"""Add a field.
:param field: field name.
:param data: column of data.
:param dtype: dtype of data column.
"""
if len(data) != len(self.array):
raise TypeError('Length of additional field must be equal to base array.')
if dtype is None:
dtype = data.dtype
self.fields.append(field)
self.data.append(data)
self.dtypes.append(dtype)
def finalize(self):
return nprf.append_fields(self.array, self.fields, self.data, self.dtypes, usemask=False)
def docstring_parameter(*sub):
"""Allow docstrings to contain parameters."""
def dec(obj):
obj.__doc__ = obj.__doc__.format(*sub)
return obj
return dec
class FastaWrite(object):
def __init__(self, filename=None, fastq=False):
"""Simple Fasta writer to file or stdout. The only task this
class achieves is formatting sequences into fixed line lengths.
:param filename: if `None` or '-' output is written to stdout
else it is written to a file opened with name `filename`.
:param mode: mode for opening file.
"""
self.filename = filename
self.fastq = fastq
def __enter__(self):
if self.filename is not None and self.filename != '-':
self.fh = open(self.filename, 'w', 0)
else:
self.fh = os.fdopen(sys.stdout.fileno(), 'w', 0)
return self
def __exit__(self, exception_type, exception_value, traceback):
if self.fh is not sys.stdout:
self.fh.close()
def write(self, name, seq, qual=None, meta=None, line_length=80):
if self.fastq:
self._write_fastq(name, seq, qual, meta)
return
#TODO: handle meta
self.fh.write(">{}\n".format(name))
for chunk in (seq[i:i+line_length] for i in xrange(0, len(seq), line_length)):
self.fh.write('{}\n'.format(chunk))
self.fh.flush()
def _write_fastq(self, name, seq, qual=None, meta=None):
if qual is None:
qual = '!'*len(seq)
#TODO: handle meta
self.fh.write("@{}\n{}\n+\n{}\n".format(name, seq, qual))
self.fh.flush()
def _try_except_pass(func, *args, **kwargs):
"""Implementation of try_except_pass below. When wrapping a function we
would ordinarily form a closure over a (sub)set of the inputs. Such
closures cannot be pickled however since the wrapper name is not
importable. We get around this by using functools.partial (which is
pickleable). The result is that we can decorate a function to mask
exceptions thrown by it.
"""
# Strip out "our" arguments, this slightly perverse business allows
# us to call the target function with multiple arguments.
recover = kwargs.pop('recover', None)
recover_fail = kwargs.pop('recover_fail', False)
try:
return func(*args, **kwargs)
except:
exc_info = sys.exc_info()
try:
if recover is not None:
recover(*args, **kwargs)
except Exception as e:
sys.stderr.write("Unrecoverable error.")
if recover_fail:
raise e
else:
traceback.print_exc(sys.exc_info()[2])
# print the original traceback
traceback.print_tb(exc_info[2])
return None
def try_except_pass(func, recover=None, recover_fail=False):
"""Wrap a function to mask exceptions that it may raise. This is
equivalent to::
def try_except_pass(func):
def wrapped()
try:
func()
except Exception as e:
print str(e)
return wrapped
in the simplest sense, but the resulting function can be pickled.
:param func: function to call
:param recover: function to call immediately after exception thrown in
calling `func`. Will be passed same args and kwargs as `func`.
:param recover_fail: raise exception if recover function raises?
..note::
See `_try_except_pass` for implementation, which is not locally
scoped here because we wish for it to be pickleable.
..warning::
Best practice would suggest this to be a dangerous function. Consider
rewriting the target function to better handle its errors. The use
case here is intended to be ignoring exceptions raised by functions
when mapped over arguments, if failures for some arguments can be
tolerated.
"""
return partial(_try_except_pass, func, recover=recover, recover_fail=recover_fail)
def except_functor(function, *args, **kwargs):
"""Wrapper for worker functions, running under multiprocessing, to better
display tracebacks when the functions raise exceptions."""
try:
return function(*args, **kwargs)
except:
raise Exception("".join(traceback.format_exception(*sys.exc_info())))
class __NotGiven(object):
def __init__(self):
"""Some horrible voodoo"""
pass
def tang_imap(
function, args, fix_args=__NotGiven(), fix_kwargs=__NotGiven(),
threads=1, unordered=False, chunksize=1,
pass_exception=False, recover=None, recover_fail=False,
):
"""Wrapper around various map functions
:param function: the function to apply, must be pickalable for multiprocess
mapping (problems will results if the function is not at the top level
of scope).
:param args: iterable of argument values of function to map over
:param fix_args: arguments to hold fixed
:param fix_kwargs: keyword arguments to hold fixed
:param threads: number of subprocesses
:param unordered: use unordered multiprocessing map
:param chunksize: multiprocessing job chunksize
:param pass_exception: ignore exceptions thrown by function?
:param recover: callback for recovering from exceptions in function
:param recover_fail: reraise exceptions when recovery fails?
.. note::
This function is a generator, the caller will need to consume this.
If fix_args or fix_kwargs are given, these are first used to create a
partially evaluated version of function.
The special :class:`__NotGiven` is used here to flag when optional arguments
are to be used.
"""
my_function = function
if not isinstance(fix_args, __NotGiven):
my_function = partial(my_function, *fix_args)
if not isinstance(fix_kwargs, __NotGiven):
my_function = partial(my_function, **fix_kwargs)
if pass_exception:
my_function = try_except_pass(my_function, recover=recover, recover_fail=recover_fail)
else:
my_function = partial(except_functor, my_function)
if threads == 1:
for r in imap(my_function, args):
yield r
else:
pool = Pool(threads)
if unordered:
mapper = pool.imap_unordered
else:
mapper = pool.imap
for r in mapper(my_function, args, chunksize=chunksize):
yield r
pool.close()
pool.join()
def fileno(file_or_fd):
"""Return a file descriptor from a file descriptor or file object."""
fd = getattr(file_or_fd, 'fileno', lambda: file_or_fd)()
if not isinstance(fd, int):
raise ValueError("Expected a file (`.fileno()`) or a file descriptor")
return fd
@contextmanager
def stderr_redirected(to=os.devnull, stderr=sys.stderr):
"""Redirect stderr (optionally something else) at the file
descriptor level. Defaults allow ignoring of stderr.
:param to: redirection target
:param stderr: stream to redirect
"""
stderr_fd = fileno(stderr)
# copy stderr_fd before it is overwritten
#NOTE: `copied` is inheritable on Windows when duplicating a standard stream
with os.fdopen(os.dup(stderr_fd), 'wb') as copied:
stderr.flush() # flush library buffers that dup2 knows nothing about
try:
os.dup2(fileno(to), stderr_fd) # $ exec 2>&to
except ValueError: # filename
with open(to, 'wb') as to_file:
os.dup2(to_file.fileno(), stderr_fd) # $ exec 2> to
try:
yield stderr # allow code to be run with the redirected stderr
finally:
# restore stderr to its previous value
#NOTE: dup2 makes stderr_fd inheritable unconditionally
stderr.flush()
os.dup2(copied.fileno(), stderr_fd) # $ exec 2>&copied
|
nanoporetech/nanonet
|
nanonet/util.py
|
Python
|
mpl-2.0
| 16,481
|
# Movie Model
class Movie(object):
# Movie Constructor
def __init__(self, title, story_line, poster_image, trailer_youtube, year):
self.title = title
self.storyline = story_line
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
self.year = year
|
vzool/fullstack-nanodegree-movie-trailer-website
|
app/model/movie.py
|
Python
|
mit
| 287
|
"""VetAppDjango URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from VetApp import views, ajax
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^animal/$', views.AnimalView.as_view()),
url(r'^owner/$', views.OwnerView.as_view(), name='Owner'),
#url(r'^owner/(?P<id>\d+)/$', views.OwnerView.as_view(), name='Owner'),
url(r'^$', views.IndexView.as_view(), name='VetApp'),
url(r'^item/$', views.ItemView.as_view(), name='Item'),
url(r'^visit/$', views.VisitView.as_view(), name='Visit'),
url(r'^vet/$', views.VetView.as_view(), name='Vet'),
url(r'^api/items/', views.items_search, name='search_items'),
url(r'^api/operations/', views.operations_search, name='search_operations'),
url(r'ajax_url/$', ajax.ajax_view, name='ajax_view'),
url(r'ajax_animal/$', ajax.get_animals, name='ajax_animal_view'),
url(r'ajax_header/$', ajax.get_header, name='ajax_header_view'),
]
|
mape90/VetAppDjango
|
VetAppDjango/urls.py
|
Python
|
mit
| 1,565
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((11990.4, 254.385, 5253.61), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((11363, 1874.59, 4946.84), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((9461.01, 1821.95, 4719.62), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((10691, 3.26048, 3974.69), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((9300.29, -952.65, 3439.55), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((7213.64, 55.244, 4128.7), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((5926.94, 525.048, 5205.63), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((6294.72, -175.962, 4952.32), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((4933.45, 1591.46, 6330.19), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((4471.5, 1054.92, 7841.22), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((3969.14, 2823.43, 8334.3), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((4572.64, 3458.85, 8109.79), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((4223.42, 4892.44, 8266.02), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((5478.56, 4410.27, 9039.19), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((5698.8, 5940.23, 10722.7), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((6556.97, 8906.69, 10582.9), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((6732.45, 9016.32, 8613.43), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((7424.76, 8281.25, 9021.08), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((6542.42, 6873.96, 9056.16), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((5361.02, 6526.23, 9957.92), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((4710.93, 5274.68, 7954.61), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((5591.45, 6786.53, 9092.93), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((6184.09, 7182.18, 8376.79), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((7207.99, 8004.92, 8426.44), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((8533.59, 7630.77, 8461.63), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((9782.72, 7989.03, 9150.07), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((8419.16, 7372.11, 8973.82), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((7557.9, 5527.62, 8273.73), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((6226.34, 6220.05, 8101.75), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((5001.88, 5813.06, 7924.5), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((5050.36, 6354.37, 7176.96), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((4768.31, 4752.8, 7775.71), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((4178.84, 6310.02, 8377.3), (0.7, 0.7, 0.7), 726.714)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((5033.39, 7241.19, 7599.64), (0.7, 0.7, 0.7), 673.585)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((5853.26, 7577.53, 8510.35), (0.7, 0.7, 0.7), 598.418)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((5717.08, 8527.29, 9493.39), (0.7, 0.7, 0.7), 693.382)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((5039.29, 6549.44, 8137.49), (0.7, 0.7, 0.7), 804.038)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((4859.39, 8389.29, 8276.48), (0.7, 0.7, 0.7), 816.178)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((5537.02, 7981.6, 7322.7), (0.7, 0.7, 0.7), 776.628)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((4252.62, 8280.3, 8228.37), (0.7, 0.7, 0.7), 750.656)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((3476.76, 7171.93, 7111.49), (0.7, 0.7, 0.7), 709.625)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((1826.56, 6274.27, 7168.57), (0.7, 0.7, 0.7), 927.681)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((-150.678, 8052.59, 6283.17), (0.7, 0.7, 0.7), 1088.21)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((34.5497, 6199.2, 5964.71), (0.7, 0.7, 0.7), 736.147)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((1457.29, 7077.15, 6351.98), (0.7, 0.7, 0.7), 861.101)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((3031.49, 5896.32, 6299.37), (0.7, 0.7, 0.7), 924.213)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((3455.89, 7189.82, 4905.98), (0.7, 0.7, 0.7), 881.828)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((2248.37, 8589.85, 5808.56), (0.7, 0.7, 0.7), 927.681)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((2931.78, 7272.24, 4672.18), (0.7, 0.7, 0.7), 831.576)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((3587.8, 5469.14, 4411.58), (0.7, 0.7, 0.7), 859.494)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((2131.13, 5564.54, 4224.64), (0.7, 0.7, 0.7), 704.845)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((3177.41, 4520.43, 5076.09), (0.7, 0.7, 0.7), 804.461)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((4642.44, 3665.29, 5718.79), (0.7, 0.7, 0.7), 934.111)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((3911.53, 2322.65, 5519.29), (0.7, 0.7, 0.7), 988.339)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((3302.62, 2429.46, 4958.19), (1, 0.7, 0), 803.7)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((3991.37, 4397.91, 4527.72), (0.7, 0.7, 0.7), 812.118)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((5618.82, 4174.55, 3079.06), (0.7, 0.7, 0.7), 1177.93)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((6963.25, 5564.69, 1440.85), (0.7, 0.7, 0.7), 1038.21)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((7274.89, 5956.6, 1103.2), (1, 0.7, 0), 758.016)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((7291.68, 5227.8, 704.912), (0.7, 0.7, 0.7), 824.046)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((6652.66, 5440.09, 1277.04), (0.7, 0.7, 0.7), 793.379)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((6313.5, 5285.87, 485.456), (0.7, 0.7, 0.7), 1011.56)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((6219.56, 4830.35, 2327.15), (0.7, 0.7, 0.7), 1097.01)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((6344.49, 4613.45, 468.573), (0.7, 0.7, 0.7), 851.626)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((6863.52, 5003.19, -1472.43), (0.7, 0.7, 0.7), 869.434)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((5204.91, 5427.9, -895.179), (0.7, 0.7, 0.7), 818.463)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((5713.3, 6955.91, -1353.84), (0.7, 0.7, 0.7), 759.539)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((5533.12, 5517.04, 622.389), (0.7, 0.7, 0.7), 1088.59)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((6681.25, 5733.91, -1015.43), (0.7, 0.7, 0.7), 822.312)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((6607.3, 7308.39, -1550.97), (0.7, 0.7, 0.7), 749.81)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((7347.45, 7701.46, -434.686), (0.7, 0.7, 0.7), 764.488)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
batxes/4Cin
|
SHH_WT_models/SHH_WT_models_final_output_0.1_-0.1_11000/SHH_WT_models46742.py
|
Python
|
gpl-3.0
| 17,581
|
import itertools
def main():
list_a = list(map(int, input().split(' ')))
list_b = list(map(int, input().split(' ')))
return list(itertools.product(list_a, list_b))
if __name__ == '__main__':
print(*main())
|
FireClaw/HackerRank
|
Python/itertools-product.py
|
Python
|
mit
| 224
|
"""
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print __doc__
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD
import numpy as np
import pylab as pl
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
pl.figure(figsize=(12, 6))
pl.subplot(1, 2, 1)
pl.title('Deviance')
pl.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
pl.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
pl.legend(loc='upper right')
pl.xlabel('Boosting Iterations')
pl.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
pl.subplot(1, 2, 2)
pl.barh(pos, feature_importance[sorted_idx], align='center')
pl.yticks(pos, boston.feature_names[sorted_idx])
pl.xlabel('Relative Importance')
pl.title('Variable Importance')
pl.show()
|
seckcoder/lang-learn
|
python/sklearn/examples/ensemble/plot_gradient_boosting_regression.py
|
Python
|
unlicense
| 2,480
|
"""
Core components of Home Assistant.
Home Assistant is a Home Automation framework for observing the state
of entities and react to changes.
"""
# pylint: disable=unused-import, too-many-lines
import asyncio
from concurrent.futures import ThreadPoolExecutor
import enum
import functools as ft
import logging
import os
import re
import signal
import sys
import threading
import time
from types import MappingProxyType
from typing import Optional, Any, Callable, List # NOQA
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.const import (
ATTR_DOMAIN, ATTR_FRIENDLY_NAME, ATTR_NOW, ATTR_SERVICE,
ATTR_SERVICE_CALL_ID, ATTR_SERVICE_DATA, EVENT_CALL_SERVICE,
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP,
EVENT_SERVICE_EXECUTED, EVENT_SERVICE_REGISTERED, EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED, MATCH_ALL, RESTART_EXIT_CODE,
SERVICE_HOMEASSISTANT_RESTART, SERVICE_HOMEASSISTANT_STOP, __version__)
from homeassistant.exceptions import (
HomeAssistantError, InvalidEntityFormatError)
from homeassistant.util.async import (
run_coroutine_threadsafe, run_callback_threadsafe)
import homeassistant.util as util
import homeassistant.util.dt as dt_util
import homeassistant.util.location as location
from homeassistant.util.unit_system import UnitSystem, METRIC_SYSTEM # NOQA
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
DOMAIN = "homeassistant"
# How often time_changed event should fire
TIMER_INTERVAL = 1 # seconds
# How long we wait for the result of a service call
SERVICE_CALL_LIMIT = 10 # seconds
# Define number of MINIMUM worker threads.
# During bootstrap of HA (see bootstrap._setup_component()) worker threads
# will be added for each component that polls devices.
MIN_WORKER_THREAD = 2
# Pattern for validating entity IDs (format: <domain>.<entity>)
ENTITY_ID_PATTERN = re.compile(r"^(\w+)\.(\w+)$")
# Interval at which we check if the pool is getting busy
MONITOR_POOL_INTERVAL = 30
_LOGGER = logging.getLogger(__name__)
def split_entity_id(entity_id: str) -> List[str]:
"""Split a state entity_id into domain, object_id."""
return entity_id.split(".", 1)
def valid_entity_id(entity_id: str) -> bool:
"""Test if an entity ID is a valid format."""
return ENTITY_ID_PATTERN.match(entity_id) is not None
def callback(func: Callable[..., None]) -> Callable[..., None]:
"""Annotation to mark method as safe to call from within the event loop."""
# pylint: disable=protected-access
func._hass_callback = True
return func
def is_callback(func: Callable[..., Any]) -> bool:
"""Check if function is safe to be called in the event loop."""
return '_hass_callback' in func.__dict__
class CoreState(enum.Enum):
"""Represent the current state of Home Assistant."""
not_running = "NOT_RUNNING"
starting = "STARTING"
running = "RUNNING"
stopping = "STOPPING"
def __str__(self) -> str:
"""Return the event."""
return self.value
class JobPriority(util.OrderedEnum):
"""Provides job priorities for event bus jobs."""
EVENT_CALLBACK = 0
EVENT_SERVICE = 1
EVENT_STATE = 2
EVENT_TIME = 3
EVENT_DEFAULT = 4
@staticmethod
def from_event_type(event_type):
"""Return a priority based on event type."""
if event_type == EVENT_TIME_CHANGED:
return JobPriority.EVENT_TIME
elif event_type == EVENT_STATE_CHANGED:
return JobPriority.EVENT_STATE
elif event_type == EVENT_CALL_SERVICE:
return JobPriority.EVENT_SERVICE
elif event_type == EVENT_SERVICE_EXECUTED:
return JobPriority.EVENT_CALLBACK
return JobPriority.EVENT_DEFAULT
class HomeAssistant(object):
"""Root object of the Home Assistant home automation."""
# pylint: disable=too-many-instance-attributes
def __init__(self, loop=None):
"""Initialize new Home Assistant object."""
self.loop = loop or asyncio.get_event_loop()
self.executor = ThreadPoolExecutor(max_workers=5)
self.loop.set_default_executor(self.executor)
self.pool = pool = create_worker_pool()
self.bus = EventBus(pool, self.loop)
self.services = ServiceRegistry(self.bus, self.add_job, self.loop)
self.states = StateMachine(self.bus, self.loop)
self.config = Config() # type: Config
self.state = CoreState.not_running
self.exit_code = None
@property
def is_running(self) -> bool:
"""Return if Home Assistant is running."""
return self.state in (CoreState.starting, CoreState.running)
def start(self) -> None:
"""Start home assistant."""
_LOGGER.info(
"Starting Home Assistant (%d threads)", self.pool.worker_count)
self.state = CoreState.starting
# Register the async start
self.loop.create_task(self.async_start())
def stop_homeassistant(*args):
"""Stop Home Assistant."""
self.exit_code = 0
self.async_add_job(self.async_stop)
def restart_homeassistant(*args):
"""Restart Home Assistant."""
self.exit_code = RESTART_EXIT_CODE
self.async_add_job(self.async_stop)
# Register the restart/stop event
self.loop.call_soon(
self.services.async_register,
DOMAIN, SERVICE_HOMEASSISTANT_STOP, stop_homeassistant
)
self.loop.call_soon(
self.services.async_register,
DOMAIN, SERVICE_HOMEASSISTANT_RESTART, restart_homeassistant
)
# Setup signal handling
if sys.platform != 'win32':
try:
self.loop.add_signal_handler(
signal.SIGTERM,
stop_homeassistant
)
except ValueError:
_LOGGER.warning('Could not bind to SIGTERM.')
try:
self.loop.add_signal_handler(
signal.SIGHUP,
restart_homeassistant
)
except ValueError:
_LOGGER.warning('Could not bind to SIGHUP.')
# Run forever and catch keyboard interrupt
try:
# Block until stopped
_LOGGER.info("Starting Home Assistant core loop")
self.loop.run_forever()
except KeyboardInterrupt:
self.loop.call_soon(stop_homeassistant)
self.loop.run_forever()
@asyncio.coroutine
def async_start(self):
"""Finalize startup from inside the event loop.
This method is a coroutine.
"""
# pylint: disable=protected-access
self.loop._thread_ident = threading.get_ident()
async_create_timer(self)
async_monitor_worker_pool(self)
self.bus.async_fire(EVENT_HOMEASSISTANT_START)
yield from self.loop.run_in_executor(None, self.pool.block_till_done)
self.state = CoreState.running
def add_job(self,
target: Callable[..., None],
*args: Any,
priority: JobPriority=JobPriority.EVENT_DEFAULT) -> None:
"""Add job to the worker pool.
target: target to call.
args: parameters for method to call.
"""
self.pool.add_job(priority, (target,) + args)
def async_add_job(self, target: Callable[..., None], *args: Any):
"""Add a job from within the eventloop.
target: target to call.
args: parameters for method to call.
"""
if is_callback(target):
self.loop.call_soon(target, *args)
elif asyncio.iscoroutinefunction(target):
self.loop.create_task(target(*args))
else:
self.add_job(target, *args)
def async_run_job(self, target: Callable[..., None], *args: Any):
"""Run a job from within the event loop.
target: target to call.
args: parameters for method to call.
"""
if is_callback(target):
target(*args)
else:
self.async_add_job(target, *args)
def _loop_empty(self):
"""Python 3.4.2 empty loop compatibility function."""
# pylint: disable=protected-access
if sys.version_info < (3, 4, 3):
return len(self.loop._scheduled) == 0 and \
len(self.loop._ready) == 0
else:
return self.loop._current_handle is None and \
len(self.loop._ready) == 0
def block_till_done(self):
"""Block till all pending work is done."""
complete = threading.Event()
@asyncio.coroutine
def sleep_wait():
"""Sleep in thread pool."""
yield from self.loop.run_in_executor(None, time.sleep, 0)
def notify_when_done():
"""Notify event loop when pool done."""
count = 0
while True:
# Wait for the work queue to empty
self.pool.block_till_done()
# Verify the loop is empty
if self._loop_empty():
count += 1
if count == 2:
break
# sleep in the loop executor, this forces execution back into
# the event loop to avoid the block thread from starving the
# async loop
run_coroutine_threadsafe(
sleep_wait(),
self.loop
).result()
complete.set()
threading.Thread(name="BlockThread", target=notify_when_done).start()
complete.wait()
def stop(self) -> None:
"""Stop Home Assistant and shuts down all threads."""
run_coroutine_threadsafe(self.async_stop(), self.loop)
@asyncio.coroutine
def async_stop(self) -> None:
"""Stop Home Assistant and shuts down all threads.
This method is a coroutine.
"""
self.state = CoreState.stopping
self.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
yield from self.loop.run_in_executor(None, self.pool.block_till_done)
yield from self.loop.run_in_executor(None, self.pool.stop)
self.executor.shutdown()
self.state = CoreState.not_running
self.loop.stop()
class EventOrigin(enum.Enum):
"""Represent the origin of an event."""
local = "LOCAL"
remote = "REMOTE"
def __str__(self):
"""Return the event."""
return self.value
class Event(object):
# pylint: disable=too-few-public-methods
"""Represents an event within the Bus."""
__slots__ = ['event_type', 'data', 'origin', 'time_fired']
def __init__(self, event_type, data=None, origin=EventOrigin.local,
time_fired=None):
"""Initialize a new event."""
self.event_type = event_type
self.data = data or {}
self.origin = origin
self.time_fired = time_fired or dt_util.utcnow()
def as_dict(self):
"""Create a dict representation of this Event."""
return {
'event_type': self.event_type,
'data': dict(self.data),
'origin': str(self.origin),
'time_fired': self.time_fired,
}
def __repr__(self):
"""Return the representation."""
# pylint: disable=maybe-no-member
if self.data:
return "<Event {}[{}]: {}>".format(
self.event_type, str(self.origin)[0],
util.repr_helper(self.data))
else:
return "<Event {}[{}]>".format(self.event_type,
str(self.origin)[0])
def __eq__(self, other):
"""Return the comparison."""
return (self.__class__ == other.__class__ and
self.event_type == other.event_type and
self.data == other.data and
self.origin == other.origin and
self.time_fired == other.time_fired)
class EventBus(object):
"""Allows firing of and listening for events."""
def __init__(self, pool: util.ThreadPool,
loop: asyncio.AbstractEventLoop) -> None:
"""Initialize a new event bus."""
self._listeners = {}
self._pool = pool
self._loop = loop
def async_listeners(self):
"""Dict with events and the number of listeners.
This method must be run in the event loop.
"""
return {key: len(self._listeners[key])
for key in self._listeners}
@property
def listeners(self):
"""Dict with events and the number of listeners."""
return run_callback_threadsafe(
self._loop, self.async_listeners
).result()
def fire(self, event_type: str, event_data=None, origin=EventOrigin.local):
"""Fire an event."""
if not self._pool.running:
raise HomeAssistantError('Home Assistant has shut down.')
self._loop.call_soon_threadsafe(self.async_fire, event_type,
event_data, origin)
def async_fire(self, event_type: str, event_data=None,
origin=EventOrigin.local, wait=False):
"""Fire an event.
This method must be run in the event loop.
"""
# Copy the list of the current listeners because some listeners
# remove themselves as a listener while being executed which
# causes the iterator to be confused.
get = self._listeners.get
listeners = get(MATCH_ALL, []) + get(event_type, [])
event = Event(event_type, event_data, origin)
if event_type != EVENT_TIME_CHANGED:
_LOGGER.info("Bus:Handling %s", event)
if not listeners:
return
job_priority = JobPriority.from_event_type(event_type)
sync_jobs = []
for func in listeners:
if asyncio.iscoroutinefunction(func):
self._loop.create_task(func(event))
elif is_callback(func):
self._loop.call_soon(func, event)
else:
sync_jobs.append((job_priority, (func, event)))
# Send all the sync jobs at once
if sync_jobs:
self._pool.add_many_jobs(sync_jobs)
def listen(self, event_type, listener):
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
"""
future = run_callback_threadsafe(
self._loop, self.async_listen, event_type, listener)
future.result()
def remove_listener():
"""Remove the listener."""
self._remove_listener(event_type, listener)
return remove_listener
def async_listen(self, event_type, listener):
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
This method must be run in the event loop.
"""
if event_type in self._listeners:
self._listeners[event_type].append(listener)
else:
self._listeners[event_type] = [listener]
def remove_listener():
"""Remove the listener."""
self.async_remove_listener(event_type, listener)
return remove_listener
def listen_once(self, event_type, listener):
"""Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns function to unsubscribe the listener.
"""
@ft.wraps(listener)
def onetime_listener(event):
"""Remove listener from eventbus and then fire listener."""
if hasattr(onetime_listener, 'run'):
return
# Set variable so that we will never run twice.
# Because the event bus might have to wait till a thread comes
# available to execute this listener it might occur that the
# listener gets lined up twice to be executed.
# This will make sure the second time it does nothing.
setattr(onetime_listener, 'run', True)
remove_listener()
listener(event)
remove_listener = self.listen(event_type, onetime_listener)
return remove_listener
def async_listen_once(self, event_type, listener):
"""Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns registered listener that can be used with remove_listener.
This method must be run in the event loop.
"""
@ft.wraps(listener)
@asyncio.coroutine
def onetime_listener(event):
"""Remove listener from eventbus and then fire listener."""
if hasattr(onetime_listener, 'run'):
return
# Set variable so that we will never run twice.
# Because the event bus loop might have async_fire queued multiple
# times, its possible this listener may already be lined up
# multiple times as well.
# This will make sure the second time it does nothing.
setattr(onetime_listener, 'run', True)
self.async_remove_listener(event_type, onetime_listener)
if asyncio.iscoroutinefunction(listener):
yield from listener(event)
else:
job_priority = JobPriority.from_event_type(event.event_type)
self._pool.add_job(job_priority, (listener, event))
self.async_listen(event_type, onetime_listener)
return onetime_listener
def remove_listener(self, event_type, listener):
"""Remove a listener of a specific event_type. (DEPRECATED 0.28)."""
_LOGGER.warning('bus.remove_listener has been deprecated. Please use '
'the function returned from calling listen.')
self._remove_listener(event_type, listener)
def _remove_listener(self, event_type, listener):
"""Remove a listener of a specific event_type."""
future = run_callback_threadsafe(
self._loop,
self.async_remove_listener, event_type, listener
)
future.result()
def async_remove_listener(self, event_type, listener):
"""Remove a listener of a specific event_type.
This method must be run in the event loop.
"""
try:
self._listeners[event_type].remove(listener)
# delete event_type list if empty
if not self._listeners[event_type]:
self._listeners.pop(event_type)
except (KeyError, ValueError):
# KeyError is key event_type listener did not exist
# ValueError if listener did not exist within event_type
_LOGGER.warning('Unable to remove unknown listener %s',
listener)
class State(object):
"""Object to represent a state within the state machine.
entity_id: the entity that is represented.
state: the state of the entity
attributes: extra information on entity and state
last_changed: last time the state was changed, not the attributes.
last_updated: last time this object was updated.
"""
__slots__ = ['entity_id', 'state', 'attributes',
'last_changed', 'last_updated']
# pylint: disable=too-many-arguments
def __init__(self, entity_id, state, attributes=None, last_changed=None,
last_updated=None):
"""Initialize a new state."""
if not valid_entity_id(entity_id):
raise InvalidEntityFormatError((
"Invalid entity id encountered: {}. "
"Format should be <domain>.<object_id>").format(entity_id))
self.entity_id = entity_id.lower()
self.state = str(state)
self.attributes = MappingProxyType(attributes or {})
self.last_updated = last_updated or dt_util.utcnow()
self.last_changed = last_changed or self.last_updated
@property
def domain(self):
"""Domain of this state."""
return split_entity_id(self.entity_id)[0]
@property
def object_id(self):
"""Object id of this state."""
return split_entity_id(self.entity_id)[1]
@property
def name(self):
"""Name of this state."""
return (
self.attributes.get(ATTR_FRIENDLY_NAME) or
self.object_id.replace('_', ' '))
def as_dict(self):
"""Return a dict representation of the State.
To be used for JSON serialization.
Ensures: state == State.from_dict(state.as_dict())
"""
return {'entity_id': self.entity_id,
'state': self.state,
'attributes': dict(self.attributes),
'last_changed': self.last_changed,
'last_updated': self.last_updated}
@classmethod
def from_dict(cls, json_dict):
"""Initialize a state from a dict.
Ensures: state == State.from_json_dict(state.to_json_dict())
"""
if not (json_dict and 'entity_id' in json_dict and
'state' in json_dict):
return None
last_changed = json_dict.get('last_changed')
if isinstance(last_changed, str):
last_changed = dt_util.parse_datetime(last_changed)
last_updated = json_dict.get('last_updated')
if isinstance(last_updated, str):
last_updated = dt_util.parse_datetime(last_updated)
return cls(json_dict['entity_id'], json_dict['state'],
json_dict.get('attributes'), last_changed, last_updated)
def __eq__(self, other):
"""Return the comparison of the state."""
return (self.__class__ == other.__class__ and
self.entity_id == other.entity_id and
self.state == other.state and
self.attributes == other.attributes)
def __repr__(self):
"""Return the representation of the states."""
attr = "; {}".format(util.repr_helper(self.attributes)) \
if self.attributes else ""
return "<state {}={}{} @ {}>".format(
self.entity_id, self.state, attr,
dt_util.as_local(self.last_changed).isoformat())
class StateMachine(object):
"""Helper class that tracks the state of different entities."""
def __init__(self, bus, loop):
"""Initialize state machine."""
self._states = {}
self._bus = bus
self._loop = loop
def entity_ids(self, domain_filter=None):
"""List of entity ids that are being tracked."""
future = run_callback_threadsafe(
self._loop, self.async_entity_ids, domain_filter
)
return future.result()
def async_entity_ids(self, domain_filter=None):
"""List of entity ids that are being tracked."""
if domain_filter is None:
return list(self._states.keys())
domain_filter = domain_filter.lower()
return [state.entity_id for state in self._states.values()
if state.domain == domain_filter]
def all(self):
"""Create a list of all states."""
return run_callback_threadsafe(self._loop, self.async_all).result()
def async_all(self):
"""Create a list of all states.
This method must be run in the event loop.
"""
return list(self._states.values())
def get(self, entity_id):
"""Retrieve state of entity_id or None if not found.
Async friendly.
"""
return self._states.get(entity_id.lower())
def is_state(self, entity_id, state):
"""Test if entity exists and is specified state.
Async friendly.
"""
state_obj = self.get(entity_id)
return state_obj and state_obj.state == state
def is_state_attr(self, entity_id, name, value):
"""Test if entity exists and has a state attribute set to value.
Async friendly.
"""
state_obj = self.get(entity_id)
return state_obj and state_obj.attributes.get(name, None) == value
def remove(self, entity_id):
"""Remove the state of an entity.
Returns boolean to indicate if an entity was removed.
"""
return run_callback_threadsafe(
self._loop, self.async_remove, entity_id).result()
def async_remove(self, entity_id):
"""Remove the state of an entity.
Returns boolean to indicate if an entity was removed.
This method must be run in the event loop.
"""
entity_id = entity_id.lower()
old_state = self._states.pop(entity_id, None)
if old_state is None:
return False
event_data = {
'entity_id': entity_id,
'old_state': old_state,
'new_state': None,
}
self._bus.async_fire(EVENT_STATE_CHANGED, event_data)
return True
def set(self, entity_id, new_state, attributes=None, force_update=False):
"""Set the state of an entity, add entity if it does not exist.
Attributes is an optional dict to specify attributes of this state.
If you just update the attributes and not the state, last changed will
not be affected.
"""
run_callback_threadsafe(
self._loop,
self.async_set, entity_id, new_state, attributes, force_update,
).result()
def async_set(self, entity_id, new_state, attributes=None,
force_update=False):
"""Set the state of an entity, add entity if it does not exist.
Attributes is an optional dict to specify attributes of this state.
If you just update the attributes and not the state, last changed will
not be affected.
This method must be run in the event loop.
"""
entity_id = entity_id.lower()
new_state = str(new_state)
attributes = attributes or {}
old_state = self._states.get(entity_id)
is_existing = old_state is not None
same_state = (is_existing and old_state.state == new_state and
not force_update)
same_attr = is_existing and old_state.attributes == attributes
if same_state and same_attr:
return
# If state did not exist or is different, set it
last_changed = old_state.last_changed if same_state else None
state = State(entity_id, new_state, attributes, last_changed)
self._states[entity_id] = state
event_data = {
'entity_id': entity_id,
'old_state': old_state,
'new_state': state,
}
self._bus.async_fire(EVENT_STATE_CHANGED, event_data)
# pylint: disable=too-few-public-methods
class Service(object):
"""Represents a callable service."""
__slots__ = ['func', 'description', 'fields', 'schema',
'is_callback', 'is_coroutinefunction']
def __init__(self, func, description, fields, schema):
"""Initialize a service."""
self.func = func
self.description = description or ''
self.fields = fields or {}
self.schema = schema
self.is_callback = is_callback(func)
self.is_coroutinefunction = asyncio.iscoroutinefunction(func)
def as_dict(self):
"""Return dictionary representation of this service."""
return {
'description': self.description,
'fields': self.fields,
}
# pylint: disable=too-few-public-methods
class ServiceCall(object):
"""Represents a call to a service."""
__slots__ = ['domain', 'service', 'data', 'call_id']
def __init__(self, domain, service, data=None, call_id=None):
"""Initialize a service call."""
self.domain = domain.lower()
self.service = service.lower()
self.data = MappingProxyType(data or {})
self.call_id = call_id
def __repr__(self):
"""Return the represenation of the service."""
if self.data:
return "<ServiceCall {}.{}: {}>".format(
self.domain, self.service, util.repr_helper(self.data))
else:
return "<ServiceCall {}.{}>".format(self.domain, self.service)
class ServiceRegistry(object):
"""Offers services over the eventbus."""
def __init__(self, bus, add_job, loop):
"""Initialize a service registry."""
self._services = {}
self._add_job = add_job
self._bus = bus
self._loop = loop
self._cur_id = 0
run_callback_threadsafe(
loop,
bus.async_listen, EVENT_CALL_SERVICE, self._event_to_service_call,
)
@property
def services(self):
"""Dict with per domain a list of available services."""
return run_callback_threadsafe(
self._loop, self.async_services,
).result()
def async_services(self):
"""Dict with per domain a list of available services."""
return {domain: {key: value.as_dict() for key, value
in self._services[domain].items()}
for domain in self._services}
def has_service(self, domain, service):
"""Test if specified service exists."""
return service.lower() in self._services.get(domain.lower(), [])
# pylint: disable=too-many-arguments
def register(self, domain, service, service_func, description=None,
schema=None):
"""
Register a service.
Description is a dict containing key 'description' to describe
the service and a key 'fields' to describe the fields.
Schema is called to coerce and validate the service data.
"""
run_callback_threadsafe(
self._loop,
self.async_register, domain, service, service_func, description,
schema
).result()
def async_register(self, domain, service, service_func, description=None,
schema=None):
"""
Register a service.
Description is a dict containing key 'description' to describe
the service and a key 'fields' to describe the fields.
Schema is called to coerce and validate the service data.
This method must be run in the event loop.
"""
domain = domain.lower()
service = service.lower()
description = description or {}
service_obj = Service(service_func, description.get('description'),
description.get('fields', {}), schema)
if domain in self._services:
self._services[domain][service] = service_obj
else:
self._services[domain] = {service: service_obj}
self._bus.async_fire(
EVENT_SERVICE_REGISTERED,
{ATTR_DOMAIN: domain, ATTR_SERVICE: service}
)
def call(self, domain, service, service_data=None, blocking=False):
"""
Call a service.
Specify blocking=True to wait till service is executed.
Waits a maximum of SERVICE_CALL_LIMIT.
If blocking = True, will return boolean if service executed
succesfully within SERVICE_CALL_LIMIT.
This method will fire an event to call the service.
This event will be picked up by this ServiceRegistry and any
other ServiceRegistry that is listening on the EventBus.
Because the service is sent as an event you are not allowed to use
the keys ATTR_DOMAIN and ATTR_SERVICE in your service_data.
"""
return run_coroutine_threadsafe(
self.async_call(domain, service, service_data, blocking),
self._loop
).result()
@callback
def async_call(self, domain, service, service_data=None, blocking=False):
"""
Call a service.
Specify blocking=True to wait till service is executed.
Waits a maximum of SERVICE_CALL_LIMIT.
If blocking = True, will return boolean if service executed
succesfully within SERVICE_CALL_LIMIT.
This method will fire an event to call the service.
This event will be picked up by this ServiceRegistry and any
other ServiceRegistry that is listening on the EventBus.
Because the service is sent as an event you are not allowed to use
the keys ATTR_DOMAIN and ATTR_SERVICE in your service_data.
This method is a coroutine.
"""
call_id = self._generate_unique_id()
event_data = {
ATTR_DOMAIN: domain.lower(),
ATTR_SERVICE: service.lower(),
ATTR_SERVICE_DATA: service_data,
ATTR_SERVICE_CALL_ID: call_id,
}
if blocking:
fut = asyncio.Future(loop=self._loop)
@callback
def service_executed(event):
"""Callback method that is called when service is executed."""
if event.data[ATTR_SERVICE_CALL_ID] == call_id:
fut.set_result(True)
unsub = self._bus.async_listen(EVENT_SERVICE_EXECUTED,
service_executed)
self._bus.async_fire(EVENT_CALL_SERVICE, event_data)
if blocking:
done, _ = yield from asyncio.wait([fut], loop=self._loop,
timeout=SERVICE_CALL_LIMIT)
success = bool(done)
unsub()
return success
@asyncio.coroutine
def _event_to_service_call(self, event):
"""Callback for SERVICE_CALLED events from the event bus."""
service_data = event.data.get(ATTR_SERVICE_DATA) or {}
domain = event.data.get(ATTR_DOMAIN).lower()
service = event.data.get(ATTR_SERVICE).lower()
call_id = event.data.get(ATTR_SERVICE_CALL_ID)
if not self.has_service(domain, service):
if event.origin == EventOrigin.local:
_LOGGER.warning('Unable to find service %s/%s',
domain, service)
return
service_handler = self._services[domain][service]
def fire_service_executed():
"""Fire service executed event."""
if not call_id:
return
data = {ATTR_SERVICE_CALL_ID: call_id}
if (service_handler.is_coroutinefunction or
service_handler.is_callback):
self._bus.async_fire(EVENT_SERVICE_EXECUTED, data)
else:
self._bus.fire(EVENT_SERVICE_EXECUTED, data)
try:
if service_handler.schema:
service_data = service_handler.schema(service_data)
except vol.Invalid as ex:
_LOGGER.error('Invalid service data for %s.%s: %s',
domain, service, humanize_error(service_data, ex))
fire_service_executed()
return
service_call = ServiceCall(domain, service, service_data, call_id)
if service_handler.is_callback:
service_handler.func(service_call)
fire_service_executed()
elif service_handler.is_coroutinefunction:
yield from service_handler.func(service_call)
fire_service_executed()
else:
def execute_service():
"""Execute a service and fires a SERVICE_EXECUTED event."""
service_handler.func(service_call)
fire_service_executed()
self._add_job(execute_service, priority=JobPriority.EVENT_SERVICE)
def _generate_unique_id(self):
"""Generate a unique service call id."""
self._cur_id += 1
return "{}-{}".format(id(self), self._cur_id)
class Config(object):
"""Configuration settings for Home Assistant."""
# pylint: disable=too-many-instance-attributes
def __init__(self):
"""Initialize a new config object."""
self.latitude = None # type: Optional[float]
self.longitude = None # type: Optional[float]
self.elevation = None # type: Optional[int]
self.location_name = None # type: Optional[str]
self.time_zone = None # type: Optional[str]
self.units = METRIC_SYSTEM # type: UnitSystem
# If True, pip install is skipped for requirements on startup
self.skip_pip = False # type: bool
# List of loaded components
self.components = []
# Remote.API object pointing at local API
self.api = None
# Directory that holds the configuration
self.config_dir = None
def distance(self: object, lat: float, lon: float) -> float:
"""Calculate distance from Home Assistant."""
return self.units.length(
location.distance(self.latitude, self.longitude, lat, lon), 'm')
def path(self, *path):
"""Generate path to the file within the config dir."""
if self.config_dir is None:
raise HomeAssistantError("config_dir is not set")
return os.path.join(self.config_dir, *path)
def as_dict(self):
"""Create a dict representation of this dict."""
time_zone = self.time_zone or dt_util.UTC
return {
'latitude': self.latitude,
'longitude': self.longitude,
'unit_system': self.units.as_dict(),
'location_name': self.location_name,
'time_zone': time_zone.zone,
'components': self.components,
'config_dir': self.config_dir,
'version': __version__
}
def async_create_timer(hass, interval=TIMER_INTERVAL):
"""Create a timer that will start on HOMEASSISTANT_START."""
stop_event = asyncio.Event(loop=hass.loop)
# Setting the Event inside the loop by marking it as a coroutine
@callback
def stop_timer(event):
"""Stop the timer."""
stop_event.set()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_timer)
@asyncio.coroutine
def timer(interval, stop_event):
"""Create an async timer."""
_LOGGER.info("Timer:starting")
last_fired_on_second = -1
calc_now = dt_util.utcnow
while not stop_event.is_set():
now = calc_now()
# First check checks if we are not on a second matching the
# timer interval. Second check checks if we did not already fire
# this interval.
if now.second % interval or \
now.second == last_fired_on_second:
# Sleep till it is the next time that we have to fire an event.
# Aim for halfway through the second that fits TIMER_INTERVAL.
# If TIMER_INTERVAL is 10 fire at .5, 10.5, 20.5, etc seconds.
# This will yield the best results because time.sleep() is not
# 100% accurate because of non-realtime OS's
slp_seconds = interval - now.second % interval + \
.5 - now.microsecond/1000000.0
yield from asyncio.sleep(slp_seconds, loop=hass.loop)
now = calc_now()
last_fired_on_second = now.second
# Event might have been set while sleeping
if not stop_event.is_set():
try:
# Schedule the bus event
hass.loop.call_soon(
hass.bus.async_fire,
EVENT_TIME_CHANGED,
{ATTR_NOW: now}
)
except HomeAssistantError:
# HA raises error if firing event after it has shut down
break
@asyncio.coroutine
def start_timer(event):
"""Start our async timer."""
hass.loop.create_task(timer(interval, stop_event))
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_timer)
def create_worker_pool(worker_count=None):
"""Create a worker pool."""
if worker_count is None:
worker_count = MIN_WORKER_THREAD
def job_handler(job):
"""Called whenever a job is available to do."""
try:
func, *args = job
func(*args)
except Exception: # pylint: disable=broad-except
# Catch any exception our service/event_listener might throw
# We do not want to crash our ThreadPool
_LOGGER.exception("BusHandler:Exception doing job")
return util.ThreadPool(job_handler, worker_count)
def async_monitor_worker_pool(hass):
"""Create a monitor for the thread pool to check if pool is misbehaving."""
busy_threshold = hass.pool.worker_count * 3
handle = None
def schedule():
"""Schedule the monitor."""
nonlocal handle
handle = hass.loop.call_later(MONITOR_POOL_INTERVAL,
check_pool_threshold)
def check_pool_threshold():
"""Check pool size."""
nonlocal busy_threshold
pending_jobs = hass.pool.queue_size
if pending_jobs < busy_threshold:
schedule()
return
_LOGGER.warning(
"WorkerPool:All %d threads are busy and %d jobs pending",
hass.pool.worker_count, pending_jobs)
for start, job in hass.pool.current_jobs:
_LOGGER.warning("WorkerPool:Current job started at %s: %s",
dt_util.as_local(start).isoformat(), job)
busy_threshold *= 2
schedule()
schedule()
@callback
def stop_monitor(event):
"""Stop the monitor."""
handle.cancel()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_monitor)
|
varunr047/homefile
|
homeassistant/core.py
|
Python
|
mit
| 41,923
|
from __future__ import unicode_literals
from django.db import models
from django.core.validators import RegexValidator
class WebPage(models.Model):
feed_name = models.CharField(
max_length=50,
unique=True,
validators=[RegexValidator(regex="[\w\-]+")]
)
url = models.URLField()
selector = models.TextField()
interval = models.PositiveIntegerField(default=5)
max_results = models.PositiveIntegerField(default=100)
is_enabled = models.BooleanField(default=True)
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
class Meta:
ordering = ["-updated_on", "-created_on"]
def __str__(self):
return "{0} - {1}".format(self.url, self.selector)
class PageScrapeResult(models.Model):
page = models.ForeignKey(WebPage)
output = models.TextField(null=True)
hash = models.TextField(null=True)
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
class Meta:
ordering = ["-updated_on", "-created_on"]
def __str__(self):
return "{0} - {1}".format(self.page.url, self.updated_on)
|
theju/atifier
|
server/core/models.py
|
Python
|
mit
| 1,199
|
#!/usr/bin/env python
"""
Project:
Date: 10/27/17 12:40 PM
Author: Demian D. Gomez
"""
import os
import traceback
import platform
import datetime
# deps
from tqdm import tqdm
# app
import dbConnection
import pyOptions
import pyArchiveStruct
import pyRinex
import pyDate
import pyJobServer
from Utils import file_append
class callback_class():
def __init__(self, pbar):
self.errors = None
self.pbar = pbar
def callbackfunc(self, args):
msg = args
self.errors = msg
self.pbar.update(1)
def verify_rinex_date_multiday(date, rinexinfo, Config):
# function to verify if rinex is multiday or if the file is from the date it was stored in the archive
# returns true if parent process can continue with insert
# returns false if file had to be moved from the archive (date != rinex.date or multiday file)
# check if rinex is a multiday file (rinex with more than one day of observations)
if rinexinfo.multiday:
# move all the files to the repository, delete the crinex from the archive, log the event
rnxlist = []
for rnx in rinexinfo.multiday_rnx_list:
rnxlist.append(rnx.rinex)
# some other file, move it to the repository
retry_folder = os.path.join(Config.repository_data_in_retry, 'multidays_found/%s/%s' % (rnx.date.yyyy(), rnx.date.ddd()))
rnx.compress_local_copyto(retry_folder)
# remove crinex from archive
os.remove(rinexinfo.origin_file)
return False
# compare the date of the rinex with the date in the archive
elif not date == rinexinfo.date:
# move the file out of the archive because it's in the wrong spot (wrong folder, wrong name, etc)
# let pyArchiveService fix the issue
retry_folder = os.path.join(Config.repository_data_in_retry, 'wrong_date_found/%s/%s' % (date.yyyy(), date.ddd()))
# move the crinex out of the archive
rinexinfo.move_origin_file(retry_folder)
return False
else:
return True
def UpdateRecord(rinex, path):
cnn = dbConnection.Cnn('gnss_data.cfg')
Config = pyOptions.ReadOptions('gnss_data.cfg')
try:
rnxobj = pyRinex.ReadRinex(rinex['NetworkCode'],
rinex['StationCode'],
path)
date = pyDate.Date(year = rinex['ObservationYear'],
doy = rinex['ObservationDOY'])
if not verify_rinex_date_multiday(date, rnxobj, Config):
cnn.begin_transac()
# propagate the deletes
cnn.query(
'DELETE FROM gamit_soln WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\' AND "Year" = %i AND "DOY" = %i'
% (rinex['NetworkCode'], rinex['StationCode'], rinex['ObservationYear'], rinex['ObservationDOY']))
cnn.query(
'DELETE FROM ppp_soln WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\' AND "Year" = %i AND "DOY" = %i'
% (rinex['NetworkCode'], rinex['StationCode'], rinex['ObservationYear'], rinex['ObservationDOY']))
cnn.query(
'DELETE FROM rinex WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\' AND "ObservationYear" = %i AND "ObservationDOY" = %i'
% (rinex['NetworkCode'], rinex['StationCode'], rinex['ObservationYear'], rinex['ObservationDOY']))
cnn.commit_transac()
return 'Multiday rinex file moved out of the archive: ' + rinex['NetworkCode'] + '.' + rinex['StationCode'] + ' ' + str(rinex['ObservationYear']) + ' ' + str(rinex['ObservationDOY']) + ' using node ' + platform.node()
else:
cnn.update('rinex', rinex, Completion=rnxobj.completion)
except pyRinex.pyRinexExceptionBadFile:
# empty file or problem with crinex format, move out
archive = pyArchiveStruct.RinexStruct(cnn)
archive.remove_rinex(rinex, os.path.join(Config.repository_data_reject, 'bad_rinex/%i/%03i' % (rinex['ObservationYear'], rinex['ObservationDOY'])))
except Exception:
return traceback.format_exc() + ' processing rinex: ' + rinex['NetworkCode'] + '.' + rinex['StationCode'] + ' ' + str(rinex['ObservationYear']) + ' ' + str(rinex['ObservationDOY']) + ' using node ' + platform.node()
def output_handle(callback):
messages = [outmsg.errors for outmsg in callback]
if len([out_msg for out_msg in messages if out_msg]) > 0:
tqdm.write(
' >> There were unhandled errors during this batch. Please check errors_pyScanArchive.log for details')
# function to print any error that are encountered during parallel execution
for msg in messages:
if msg:
file_append('errors_amend.log',
'ON ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ' an unhandled error occurred:\n' +
msg + '\n' +
'END OF ERROR =================== \n\n')
return []
cnn = dbConnection.Cnn('gnss_data.cfg')
options = pyOptions.ReadOptions('gnss_data.cfg')
JobServer = pyJobServer.JobServer(options)
archive = pyArchiveStruct.RinexStruct(cnn)
for table in ['rinex']:
print(" >> Processing " + table)
tbl = cnn.query('SELECT * FROM ' + table + ' WHERE "Completion" is null')
rnx = tbl.dictresult()
callback = []
pbar = tqdm(total=len(rnx), ncols=80)
depfuncs = (verify_rinex_date_multiday,)
modules = ('pyRinex', 'dbConnection', 'traceback', 'platform', 'pyDate', 'pyOptions', 'pyArchiveStruct')
for rinex in rnx:
path = archive.build_rinex_path(rinex['NetworkCode'],
rinex['StationCode'],
rinex['ObservationYear'],
rinex['ObservationDOY'])
rfile = os.path.join(options.archive_path, path)
callback.append(callback_class(pbar))
arguments = (rinex, rfile)
JobServer.SubmitJob(UpdateRecord, arguments, depfuncs, modules, callback, callback_class(pbar), 'callbackfunc')
if JobServer.process_callback:
# handle any output messages during this batch
callback = output_handle(callback)
JobServer.process_callback = False
tqdm.write(' >> waiting for jobs to finish...')
JobServer.job_server.wait()
tqdm.write(' >> Done.')
pbar.close()
output_handle(callback)
print('\n')
JobServer.job_server.print_stats()
|
demiangomez/Parallel.GAMIT
|
com/amend.py
|
Python
|
gpl-3.0
| 6,532
|
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
# Tests for v2 features.
import atom_tests.core_test
import atom_tests.data_test
import atom_tests.http_core_test
import atom_tests.auth_test
import atom_tests.mock_http_core_test
import atom_tests.client_test
import gdata_tests.client_test
import gdata_tests.core_test
import gdata_tests.data_test
import gdata_tests.data_smoke_test
import gdata_tests.client_smoke_test
import gdata_tests.live_client_test
import gdata_tests.gauth_test
import gdata_tests.blogger.data_test
import gdata_tests.blogger.live_client_test
import gdata_tests.maps.data_test
import gdata_tests.maps.live_client_test
import gdata_tests.spreadsheets.data_test
import gdata_tests.spreadsheets.live_client_test
import gdata_tests.projecthosting.data_test
import gdata_tests.projecthosting.live_client_test
import gdata_tests.sites.data_test
import gdata_tests.sites.live_client_test
import gdata_tests.analytics.data_test
import gdata_tests.analytics.live_client_test
import gdata_tests.contacts.live_client_test
import gdata_tests.calendar_resource.live_client_test
import gdata_tests.calendar_resource.data_test
import gdata_tests.apps.emailsettings.data_test
import gdata_tests.apps.emailsettings.live_client_test
def suite():
return unittest.TestSuite((
atom_tests.core_test.suite(),
atom_tests.data_test.suite(),
atom_tests.http_core_test.suite(),
atom_tests.auth_test.suite(),
atom_tests.mock_http_core_test.suite(),
atom_tests.client_test.suite(),
gdata_tests.client_test.suite(),
gdata_tests.core_test.suite(),
gdata_tests.data_test.suite(),
gdata_tests.data_smoke_test.suite(),
gdata_tests.client_smoke_test.suite(),
gdata_tests.live_client_test.suite(),
gdata_tests.gauth_test.suite(),
gdata_tests.blogger.data_test.suite(),
gdata_tests.blogger.live_client_test.suite(),
gdata_tests.maps.data_test.suite(),
gdata_tests.maps.live_client_test.suite(),
gdata_tests.spreadsheets.data_test.suite(),
gdata_tests.spreadsheets.live_client_test.suite(),
gdata_tests.projecthosting.data_test.suite(),
gdata_tests.projecthosting.live_client_test.suite(),
gdata_tests.sites.data_test.suite(),
gdata_tests.sites.live_client_test.suite(),
gdata_tests.analytics.data_test.suite(),
gdata_tests.analytics.live_client_test.suite(),
gdata_tests.contacts.live_client_test.suite(),
gdata_tests.calendar_resource.live_client_test.suite(),
gdata_tests.calendar_resource.data_test.suite(),
gdata_tests.apps.emailsettings.data_test.suite(),
gdata_tests.apps.emailsettings.live_client_test.suite(),))
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
|
dekom/threepress-bookworm-read-only
|
bookworm/gdata/tests/all_tests.py
|
Python
|
bsd-3-clause
| 3,464
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
# Copyright (C) Zing contributors.
#
# This file is a part of the Zing project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from django.http import Http404
from django.shortcuts import redirect, render
from django.urls import reverse
from django.utils.functional import cached_property
from django.utils.lru_cache import lru_cache
from pootle.core.browser import ItemTypes
from pootle.core.decorators import get_path_obj, permission_required
from pootle.core.views import (
PootleBrowseView, PootleTranslateView, PootleExportView)
from pootle.i18n.gettext import tr_lang
from pootle_app.views.admin.permissions import admin_permissions
from .forms import LanguageSpecialCharsForm
from .models import Language
class LanguageMixin(object):
model = Language
browse_url_path = "pootle-language-browse"
export_url_path = "pootle-language-export"
translate_url_path = "pootle-language-translate"
template_extends = 'languages/base.html'
@property
def language(self):
return self.object
@property
def permission_context(self):
return self.get_object().directory
@property
def url_kwargs(self):
return {"language_code": self.object.code}
@lru_cache()
def get_object(self):
lang = Language.get_canonical(self.kwargs["language_code"])
if lang is None:
raise Http404
return lang
def get(self, *args, **kwargs):
self.object = self.get_object()
if self.object.code != kwargs["language_code"]:
return redirect(
self.url_pattern_name,
self.object.code,
permanent=True)
return super(LanguageMixin, self).get(*args, **kwargs)
class LanguageBrowseView(LanguageMixin, PootleBrowseView):
url_pattern_name = "pootle-language-browse"
@property
def stats(self):
return self.object.get_stats_for_user(self.request.user)
@cached_property
def items(self):
return self.object.get_children_for_user(self.request.user)
@property
def language(self):
return {
'code': self.object.code,
'name': tr_lang(self.object.fullname)}
def get(self, *args, **kwargs):
response = super(LanguageBrowseView, self).get(*args, **kwargs)
response.set_cookie('pootle-language', self.object.code)
return response
def get_item_type(self, path_obj):
return ItemTypes.PROJECT
def get_item_title(self, path_obj):
return path_obj.project.name
class LanguageTranslateView(LanguageMixin, PootleTranslateView):
url_pattern_name = "pootle-language-translate"
class LanguageExportView(LanguageMixin, PootleExportView):
url_pattern_name = "pootle-language-export"
source_language = "en"
@get_path_obj
@permission_required('administrate')
def language_admin(request, language):
ctx = {
'page': 'admin-permissions',
'browse_url': reverse('pootle-language-browse', kwargs={
'language_code': language.code,
}),
'translate_url': reverse('pootle-language-translate', kwargs={
'language_code': language.code,
}),
'language': language,
'directory': language.directory,
}
return admin_permissions(request, language.directory,
'languages/admin/permissions.html', ctx)
@get_path_obj
@permission_required('administrate')
def language_characters_admin(request, language):
form = LanguageSpecialCharsForm(request.POST
if request.method == 'POST'
else None,
instance=language)
if form.is_valid():
form.save()
return redirect('pootle-language-browse', language.code)
ctx = {
'page': 'admin-characters',
'browse_url': reverse('pootle-language-browse', kwargs={
'language_code': language.code,
}),
'translate_url': reverse('pootle-language-translate', kwargs={
'language_code': language.code,
}),
'language': language,
'directory': language.directory,
'form': form,
}
return render(request, 'languages/admin/characters.html', ctx)
|
iafan/zing
|
pootle/apps/pootle_language/views.py
|
Python
|
gpl-3.0
| 4,465
|
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from django.contrib.auth.decorators import login_required, permission_required
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import render_to_response, get_object_or_404
from django.utils.translation import ugettext as _
from django.template.context import RequestContext
from user_profile.models import Manager
from callcenter.models import Queue, Tier
from callcenter.constants import QUEUE_COLUMN_NAME, TIER_COLUMN_NAME
from callcenter.forms import QueueFrontEndForm, TierFrontEndForm
from dialer_campaign.function_def import user_dialer_setting_msg
from common.common_functions import get_pagination_vars
from survey.models import Section_template
redirect_url_to_queue_list = '/module/queue/'
redirect_url_to_tier_list = '/module/tier/'
@permission_required('callcenter.view_queue', login_url='/')
@login_required
def queue_list(request):
"""Queue list for the logged in Manager
**Attributes**:
* ``template`` - frontend/queue/list.html
**Logic Description**:
* List all queue which belong to the logged in manager.
"""
sort_col_field_list = ['name', 'strategy', 'time_base_score', 'updated_date']
default_sort_field = 'id'
pagination_data = get_pagination_vars(
request, sort_col_field_list, default_sort_field)
PAGE_SIZE = pagination_data['PAGE_SIZE']
sort_order = pagination_data['sort_order']
queue_list = Queue.objects\
.filter(manager=request.user).order_by(sort_order)
template = 'frontend/queue/list.html'
data = {
'msg': request.session.get('msg'),
'error_msg': request.session.get('error_msg'),
'queue_list': queue_list,
'total_queue': queue_list.count(),
'PAGE_SIZE': PAGE_SIZE,
'QUEUE_COLUMN_NAME': QUEUE_COLUMN_NAME,
'col_name_with_order': pagination_data['col_name_with_order'],
'dialer_setting_msg': user_dialer_setting_msg(request.user),
}
request.session['msg'] = ''
request.session['error_msg'] = ''
return render_to_response(template, data,
context_instance=RequestContext(request))
@permission_required('callcenter.add_queue', login_url='/')
@login_required
def queue_add(request):
"""Add new queue for the logged in manager
**Attributes**:
* ``form`` - QueueFrontEndForm
* ``template`` - frontend/queue/change.html
**Logic Description**:
* Add a new queue which will belong to the logged in manager
via the UserCreationForm & get redirected to the queue list
"""
form = QueueFrontEndForm()
if request.method == 'POST':
form = QueueFrontEndForm(request.POST)
if form.is_valid():
obj = form.save(commit=False)
obj.manager = Manager.objects.get(username=request.user)
obj.save()
request.session["msg"] = _('"%(name)s" queue is added.') %\
{'name': obj.name}
return HttpResponseRedirect(redirect_url_to_queue_list)
template = 'frontend/queue/change.html'
data = {
'form': form,
'action': 'add',
'dialer_setting_msg': user_dialer_setting_msg(request.user),
}
return render_to_response(template, data,
context_instance=RequestContext(request))
def queue_delete_allow(queue_id):
"""Check queue is attached to any survey section or not"""
try:
section_count = Section_template.objects.filter(queue_id=queue_id).count()
if section_count > 0:
return False
else:
return True
except:
return True
@permission_required('callcenter.delete_queue', login_url='/')
@login_required
def queue_del(request, object_id):
"""Delete queue for the logged in Manager
**Attributes**:
* ``object_id`` - Selected queue object
* ``object_list`` - Selected queue objects
**Logic Description**:
* Delete selected queue from the queue list
"""
if int(object_id) != 0:
# When object_id is not 0
queue = get_object_or_404(
Queue, pk=object_id, manager=request.user)
if queue_delete_allow(object_id):
# Delete queue
request.session["msg"] = _('"%(name)s" is deleted.')\
% {'name': queue.name}
queue.delete()
else:
request.session["error_msg"] = \
_('"%(name)s" is not allowed to delete because it is being used with survey.')\
% {'name': queue.name}
else:
# When object_id is 0 (Multiple records delete)
values = request.POST.getlist('select')
values = ", ".join(["%s" % el for el in values])
deleted_list = []
not_deleted_list = []
try:
queue_list = Queue.objects.extra(where=['id IN (%s)' % values])
if queue_list:
for queue_obj in queue_list:
if queue_delete_allow(queue_obj.id):
deleted_list.append(str(queue_obj.name))
queue_obj.delete()
else:
not_deleted_list.append(str(queue_obj.name))
if deleted_list:
request.session["msg"] =\
_('%s queue(s) are deleted.') % deleted_list
if not_deleted_list:
request.session["error_msg"] =\
_('%s queue(s) are not deleted because they are being used with surveys.')\
% not_deleted_list
except:
raise Http404
return HttpResponseRedirect(redirect_url_to_queue_list)
@permission_required('callcenter.change_queue', login_url='/')
@login_required
def queue_change(request, object_id):
"""Update/Delete queue for the logged in manager
**Attributes**:
* ``object_id`` - Selected queue object
* ``form`` - QueueFrontEndForm
* ``template`` - frontend/queue/change.html
**Logic Description**:
* Update/delete selected queue from the queue list
via QueueFrontEndForm & get redirected to the queue list
"""
queue = get_object_or_404(
Queue, pk=object_id, manager=request.user)
form = QueueFrontEndForm(instance=queue)
if request.method == 'POST':
# Delete queue
if request.POST.get('delete'):
queue_del(request, object_id)
return HttpResponseRedirect(redirect_url_to_queue_list)
else:
# Update queue
form = QueueFrontEndForm(request.POST, instance=queue)
if form.is_valid():
obj = form.save()
request.session["msg"] = _('"%(name)s" is updated.') \
% {'name': obj.name}
return HttpResponseRedirect(redirect_url_to_queue_list)
template = 'frontend/queue/change.html'
data = {
'form': form,
'action': 'update',
'dialer_setting_msg': user_dialer_setting_msg(request.user),
}
return render_to_response(template, data,
context_instance=RequestContext(request))
@permission_required('callcenter.view_tier', login_url='/')
@login_required
def tier_list(request):
"""Tier list for the logged in Manager
**Attributes**:
* ``template`` - frontend/tier/list.html
**Logic Description**:
* List all tier which belong to the logged in manager.
"""
sort_col_field_list = ['agent', 'queue', 'level', 'position', 'updated_date']
default_sort_field = 'id'
pagination_data = get_pagination_vars(request, sort_col_field_list, default_sort_field)
PAGE_SIZE = pagination_data['PAGE_SIZE']
sort_order = pagination_data['sort_order']
tier_list = Tier.objects\
.filter(manager=request.user).order_by(sort_order)
template = 'frontend/tier/list.html'
data = {
'msg': request.session.get('msg'),
'tier_list': tier_list,
'total_tier': tier_list.count(),
'PAGE_SIZE': PAGE_SIZE,
'TIER_COLUMN_NAME': TIER_COLUMN_NAME,
'col_name_with_order': pagination_data['col_name_with_order'],
'dialer_setting_msg': user_dialer_setting_msg(request.user),
}
request.session['msg'] = ''
request.session['error_msg'] = ''
return render_to_response(template, data,
context_instance=RequestContext(request))
@permission_required('callcenter.add_tier', login_url='/')
@login_required
def tier_add(request):
"""Add new tier for the logged in manager
**Attributes**:
* ``form`` - TierFrontEndForm
* ``template`` - frontend/tier/change.html
**Logic Description**:
* Add a new tier which will belong to the logged in manager
via the TierFrontEndForm & get redirected to the tier list
"""
form = TierFrontEndForm(request.user.id)
if request.method == 'POST':
form = TierFrontEndForm(request.user.id, request.POST)
if form.is_valid():
obj = form.save(commit=False)
obj.manager = Manager.objects.get(username=request.user)
obj.save()
request.session["msg"] = _('"%(name)s" tier is added.') %\
{'name': obj.id}
return HttpResponseRedirect(redirect_url_to_tier_list)
template = 'frontend/tier/change.html'
data = {
'form': form,
'action': 'add',
'dialer_setting_msg': user_dialer_setting_msg(request.user),
}
return render_to_response(template, data,
context_instance=RequestContext(request))
@permission_required('callcenter.delete_tier', login_url='/')
@login_required
def tier_del(request, object_id):
"""Delete tier for the logged in Manager
**Attributes**:
* ``object_id`` - Selected tier object
* ``object_list`` - Selected tier objects
**Logic Description**:
* Delete selected tier from the tier list
"""
if int(object_id) != 0:
# When object_id is not 0
tier = get_object_or_404(
Tier, pk=object_id, manager=request.user)
# Delete tier
request.session["msg"] = _('"%(name)s" is deleted.')\
% {'name': tier.id}
tier.delete()
else:
# When object_id is 0 (Multiple records delete)
values = request.POST.getlist('select')
values = ", ".join(["%s" % el for el in values])
try:
tier_list = Tier.objects.extra(where=['id IN (%s)' % values])
if tier_list:
request.session["msg"] =\
_('%(count)s tier(s) are deleted.')\
% {'count': tier_list.count()}
tier_list.delete()
except:
raise Http404
return HttpResponseRedirect(redirect_url_to_tier_list)
@permission_required('callcenter.change_tier', login_url='/')
@login_required
def tier_change(request, object_id):
"""Update/Delete tier for the logged in manager
**Attributes**:
* ``object_id`` - Selected tier object
* ``form`` - TierFrontEndForm
* ``template`` - frontend/tier/change.html
**Logic Description**:
* Update/delete selected tier from the tier list
via TierFrontEndForm & get redirected to the tier list
"""
tier = get_object_or_404(
Tier, pk=object_id, manager=request.user)
form = TierFrontEndForm(request.user.id, instance=tier)
if request.method == 'POST':
# Delete tier
if request.POST.get('delete'):
tier_del(request, object_id)
return HttpResponseRedirect(redirect_url_to_tier_list)
else:
# Update tier
form = TierFrontEndForm(request.user.id, request.POST, instance=tier)
if form.is_valid():
form.save()
request.session["msg"] = _('"%(id)s" tier is updated.') \
% {'id': tier.id}
return HttpResponseRedirect(redirect_url_to_tier_list)
template = 'frontend/tier/change.html'
data = {
'form': form,
'action': 'update',
'dialer_setting_msg': user_dialer_setting_msg(request.user),
}
return render_to_response(template, data,
context_instance=RequestContext(request))
|
gale320/newfies-dialer
|
newfies/callcenter/views.py
|
Python
|
mpl-2.0
| 12,784
|
# $Id$
#
# Copyright (C) 2000-2008 greg Landrum and Rational Discovery LLC
# All Rights Reserved
#
""" code for dealing with composite models
For a model to be useable here, it should support the following API:
- _ClassifyExample(example)_, returns a classification
Other compatibility notes:
1) To use _Composite.Grow_ there must be some kind of builder
functionality which returns a 2-tuple containing (model,percent accuracy).
2) The models should be pickleable
3) It would be very happy if the models support the __cmp__ method so that
membership tests used to make sure models are unique work.
"""
from __future__ import print_function
import math
import numpy
from rdkit.six.moves import cPickle
from rdkit.ML.Data import DataUtils
class Composite(object):
"""a composite model
**Notes**
- adding a model which is already present just results in its count
field being incremented and the errors being averaged.
- typical usage:
1) grow the composite with AddModel until happy with it
2) call AverageErrors to calculate the average error values
3) call SortModels to put things in order by either error or count
- Composites can support individual models requiring either quantized or
nonquantized data. This is done by keeping a set of quantization bounds
(_QuantBounds_) in the composite and quantizing data passed in when required.
Quantization bounds can be set and interrogated using the
_Get/SetQuantBounds()_ methods. When models are added to the composite,
it can be indicated whether or not they require quantization.
- Composites are also capable of extracting relevant variables from longer lists.
This is accessible using _SetDescriptorNames()_ to register the descriptors about
which the composite cares and _SetInputOrder()_ to tell the composite what the
ordering of input vectors will be. **Note** there is a limitation on this: each
model needs to take the same set of descriptors as inputs. This could be changed.
"""
def __init__(self):
self.modelList = []
self.errList = []
self.countList = []
self.modelVotes = []
self.quantBounds = None
self.nPossibleVals = None
self.quantizationRequirements = []
self._descNames = []
self._mapOrder = None
self.activityQuant = []
def SetModelFilterData(self, modelFilterFrac=0.0, modelFilterVal=0.0):
self._modelFilterFrac = modelFilterFrac
self._modelFilterVal = modelFilterVal
def SetDescriptorNames(self, names):
""" registers the names of the descriptors this composite uses
**Arguments**
- names: a list of descriptor names (strings).
**NOTE**
the _names_ list is not
copied, so if you modify it later, the composite itself will also be modified.
"""
self._descNames = names
def GetDescriptorNames(self):
""" returns the names of the descriptors this composite uses
"""
return self._descNames
def SetQuantBounds(self, qBounds, nPossible=None):
""" sets the quantization bounds that the composite will use
**Arguments**
- qBounds: a list of quantization bounds, each quantbound is a
list of boundaries
- nPossible: a list of integers indicating how many possible values
each descriptor can take on.
**NOTE**
- if the two lists are of different lengths, this will assert out
- neither list is copied, so if you modify it later, the composite
itself will also be modified.
"""
if nPossible is not None:
assert len(qBounds) == len(nPossible), 'qBounds/nPossible mismatch'
self.quantBounds = qBounds
self.nPossibleVals = nPossible
def GetQuantBounds(self):
""" returns the quantization bounds
**Returns**
a 2-tuple consisting of:
1) the list of quantization bounds
2) the nPossibleVals list
"""
return self.quantBounds, self.nPossibleVals
def GetActivityQuantBounds(self):
if not hasattr(self, 'activityQuant'):
self.activityQuant = []
return self.activityQuant
def SetActivityQuantBounds(self, bounds):
self.activityQuant = bounds
def QuantizeActivity(self, example, activityQuant=None, actCol=-1):
if activityQuant is None:
activityQuant = self.activityQuant
if activityQuant:
example = example[:]
act = example[actCol]
for box in range(len(activityQuant)):
if act < activityQuant[box]:
act = box
break
else:
act = box + 1
example[actCol] = act
return example
def QuantizeExample(self, example, quantBounds=None):
""" quantizes an example
**Arguments**
- example: a data point (list, tuple or numpy array)
- quantBounds: a list of quantization bounds, each quantbound is a
list of boundaries. If this argument is not provided, the composite
will use its own quantBounds
**Returns**
the quantized example as a list
**Notes**
- If _example_ is different in length from _quantBounds_, this will
assert out.
- This is primarily intended for internal use
"""
if quantBounds is None:
quantBounds = self.quantBounds
assert len(example) == len(quantBounds), 'example/quantBounds mismatch'
quantExample = [None] * len(example)
for i in range(len(quantBounds)):
bounds = quantBounds[i]
p = example[i]
if len(bounds):
for box in range(len(bounds)):
if p < bounds[box]:
p = box
break
else:
p = box + 1
else:
if i != 0:
p = int(p)
quantExample[i] = p
return quantExample
def MakeHistogram(self):
""" creates a histogram of error/count pairs
**Returns**
the histogram as a series of (error, count) 2-tuples
"""
nExamples = len(self.modelList)
histo = []
i = 1
lastErr = self.errList[0]
countHere = self.countList[0]
eps = 0.001
while i < nExamples:
if self.errList[i] - lastErr > eps:
histo.append((lastErr, countHere))
lastErr = self.errList[i]
countHere = self.countList[i]
else:
countHere = countHere + self.countList[i]
i = i + 1
return histo
def CollectVotes(self, example, quantExample, appendExample=0, onlyModels=None):
""" collects votes across every member of the composite for the given example
**Arguments**
- example: the example to be voted upon
- quantExample: the quantized form of the example
- appendExample: toggles saving the example on the models
- onlyModels: if provided, this should be a sequence of model
indices. Only the specified models will be used in the
prediction.
**Returns**
a list with a vote from each member
"""
if not onlyModels:
onlyModels = range(len(self))
nModels = len(onlyModels)
votes = [-1] * len(self)
for i in onlyModels:
if self.quantizationRequirements[i]:
votes[i] = int(
round(self.modelList[i].ClassifyExample(quantExample, appendExamples=appendExample)))
else:
votes[i] = int(
round(self.modelList[i].ClassifyExample(example, appendExamples=appendExample)))
return votes
def ClassifyExample(self, example, threshold=0, appendExample=0, onlyModels=None):
""" classifies the given example using the entire composite
**Arguments**
- example: the data to be classified
- threshold: if this is a number greater than zero, then a
classification will only be returned if the confidence is
above _threshold_. Anything lower is returned as -1.
- appendExample: toggles saving the example on the models
- onlyModels: if provided, this should be a sequence of model
indices. Only the specified models will be used in the
prediction.
**Returns**
a (result,confidence) tuple
**FIX:**
statistics sucks... I'm not seeing an obvious way to get
the confidence intervals. For that matter, I'm not seeing
an unobvious way.
For now, this is just treated as a voting problem with the confidence
measure being the percent of models which voted for the winning result.
"""
if self._mapOrder is not None:
example = self._RemapInput(example)
if self.GetActivityQuantBounds():
example = self.QuantizeActivity(example)
if self.quantBounds is not None and 1 in self.quantizationRequirements:
quantExample = self.QuantizeExample(example, self.quantBounds)
else:
quantExample = []
if not onlyModels:
onlyModels = range(len(self))
self.modelVotes = self.CollectVotes(example, quantExample, appendExample=appendExample,
onlyModels=onlyModels)
votes = [0] * self.nPossibleVals[-1]
for i in onlyModels:
res = self.modelVotes[i]
votes[res] = votes[res] + self.countList[i]
totVotes = sum(votes)
res = numpy.argmax(votes)
conf = float(votes[res]) / float(totVotes)
if conf > threshold:
return res, conf
else:
return -1, conf
def GetVoteDetails(self):
""" returns the votes from the last classification
This will be _None_ if nothing has yet be classified
"""
return self.modelVotes
def _RemapInput(self, inputVect):
""" remaps the input so that it matches the expected internal ordering
**Arguments**
- inputVect: the input to be reordered
**Returns**
- a list with the reordered (and possible shorter) data
**Note**
- you must call _SetDescriptorNames()_ and _SetInputOrder()_ for this to work
- this is primarily intended for internal use
"""
order = self._mapOrder
if order is None:
return inputVect
remappedInput = [None] * len(order)
for i in range(len(order) - 1):
remappedInput[i] = inputVect[order[i]]
if order[-1] == -1:
remappedInput[-1] = 0
else:
remappedInput[-1] = inputVect[order[-1]]
return remappedInput
def GetInputOrder(self):
""" returns the input order (used in remapping inputs)
"""
return self._mapOrder
def SetInputOrder(self, colNames):
""" sets the input order
**Arguments**
- colNames: a list of the names of the data columns that will be passed in
**Note**
- you must call _SetDescriptorNames()_ first for this to work
- if the local descriptor names do not appear in _colNames_, this will
raise an _IndexError_ exception.
"""
if type(colNames) != list:
colNames = list(colNames)
descs = [x.upper() for x in self.GetDescriptorNames()]
self._mapOrder = [None] * len(descs)
colNames = [x.upper() for x in colNames]
# FIX: I believe that we're safe assuming that field 0
# is always the label, and therefore safe to ignore errors,
# but this may not be the case
try:
self._mapOrder[0] = colNames.index(descs[0])
except ValueError:
self._mapOrder[0] = 0
for i in range(1, len(descs) - 1):
try:
self._mapOrder[i] = colNames.index(descs[i])
except ValueError:
raise ValueError('cannot find descriptor name: %s in set %s' %
(repr(descs[i]), repr(colNames)))
try:
self._mapOrder[-1] = colNames.index(descs[-1])
except ValueError:
# ok, there's no obvious match for the final column (activity)
# We'll take the last one:
#self._mapOrder[-1] = len(descs)-1
self._mapOrder[-1] = -1
def Grow(self, examples, attrs, nPossibleVals, buildDriver, pruner=None, nTries=10, pruneIt=0,
needsQuantization=1, progressCallback=None, **buildArgs):
""" Grows the composite
**Arguments**
- examples: a list of examples to be used in training
- attrs: a list of the variables to be used in training
- nPossibleVals: this is used to provide a list of the number
of possible values for each variable. It is used if the
local quantBounds have not been set (for example for when you
are working with data which is already quantized).
- buildDriver: the function to call to build the new models
- pruner: a function used to "prune" (reduce the complexity of)
the resulting model.
- nTries: the number of new models to add
- pruneIt: toggles whether or not pruning is done
- needsQuantization: used to indicate whether or not this type of model
requires quantized data
- **buildArgs: all other keyword args are passed to _buildDriver_
**Note**
- new models are *added* to the existing ones
"""
silent = buildArgs.get('silent', 0)
buildArgs['silent'] = 1
buildArgs['calcTotalError'] = 1
if self._mapOrder is not None:
examples = map(self._RemapInput, examples)
if self.GetActivityQuantBounds():
for i in range(len(examples)):
examples[i] = self.QuantizeActivity(examples[i])
nPossibleVals[-1] = len(self.GetActivityQuantBounds()) + 1
if self.nPossibleVals is None:
self.nPossibleVals = nPossibleVals[:]
if needsQuantization:
trainExamples = [None] * len(examples)
nPossibleVals = self.nPossibleVals
for i in range(len(examples)):
trainExamples[i] = self.QuantizeExample(examples[i], self.quantBounds)
else:
trainExamples = examples
for i in range(nTries):
trainSet = None
if (hasattr(self, '_modelFilterFrac')) and (self._modelFilterFrac != 0):
trainIdx, temp = DataUtils.FilterData(trainExamples, self._modelFilterVal,
self._modelFilterFrac, -1, indicesOnly=1)
trainSet = [trainExamples[x] for x in trainIdx]
else:
trainSet = trainExamples
#print("Training model %i with %i out of %i examples"%(i, len(trainSet), len(trainExamples)))
model, frac = buildDriver(*(trainSet, attrs, nPossibleVals), **buildArgs)
if pruneIt:
model, frac2 = pruner(model, model.GetTrainingExamples(), model.GetTestExamples(),
minimizeTestErrorOnly=0)
frac = frac2
if hasattr(self, '_modelFilterFrac') and self._modelFilterFrac!=0 and \
hasattr(model,'_trainIndices'):
# correct the model's training indices:
trainIndices = [trainIdx[x] for x in model._trainIndices]
model._trainIndices = trainIndices
self.AddModel(model, frac, needsQuantization)
if not silent and (nTries < 10 or i % (nTries / 10) == 0):
print('Cycle: % 4d' % (i))
if progressCallback is not None:
progressCallback(i)
def ClearModelExamples(self):
for i in range(len(self)):
m = self.GetModel(i)
try:
m.ClearExamples()
except AttributeError:
pass
def Pickle(self, fileName='foo.pkl', saveExamples=0):
""" Writes this composite off to a file so that it can be easily loaded later
**Arguments**
- fileName: the name of the file to be written
- saveExamples: if this is zero, the individual models will have
their stored examples cleared.
"""
if not saveExamples:
self.ClearModelExamples()
pFile = open(fileName, 'wb+')
cPickle.dump(self, pFile, 1)
pFile.close()
def AddModel(self, model, error, needsQuantization=1):
""" Adds a model to the composite
**Arguments**
- model: the model to be added
- error: the model's error
- needsQuantization: a toggle to indicate whether or not this model
requires quantized inputs
**NOTE**
- this can be used as an alternative to _Grow()_ if you already have
some models constructed
- the errList is run as an accumulator,
you probably want to call _AverageErrors_ after finishing the forest
"""
if model in self.modelList:
try:
idx = self.modelList.index(model)
except ValueError:
# FIX: we should never get here, but sometimes we do anyway
self.modelList.append(model)
self.errList.append(error)
self.countList.append(1)
self.quantizationRequirements.append(needsQuantization)
else:
self.errList[idx] = self.errList[idx] + error
self.countList[idx] = self.countList[idx] + 1
else:
self.modelList.append(model)
self.errList.append(error)
self.countList.append(1)
self.quantizationRequirements.append(needsQuantization)
def AverageErrors(self):
""" convert local summed error to average error
"""
self.errList = list(map(lambda x, y: x / y, self.errList, self.countList))
def SortModels(self, sortOnError=1):
""" sorts the list of models
**Arguments**
sortOnError: toggles sorting on the models' errors rather than their counts
"""
if sortOnError:
order = numpy.argsort(self.errList)
else:
order = numpy.argsort(self.countList)
# these elaborate contortions are required because, at the time this
# code was written, Numeric arrays didn't unpickle so well...
#print(order,sortOnError,self.errList,self.countList)
self.modelList = [self.modelList[x] for x in order]
self.countList = [self.countList[x] for x in order]
self.errList = [self.errList[x] for x in order]
def GetModel(self, i):
""" returns a particular model
"""
return self.modelList[i]
def SetModel(self, i, val):
""" replaces a particular model
**Note**
This is included for the sake of completeness, but you need to be
*very* careful when you use it.
"""
self.modelList[i] = val
def GetCount(self, i):
""" returns the count of the _i_th model
"""
return self.countList[i]
def SetCount(self, i, val):
""" sets the count of the _i_th model
"""
self.countList[i] = val
def GetError(self, i):
""" returns the error of the _i_th model
"""
return self.errList[i]
def SetError(self, i, val):
""" sets the error of the _i_th model
"""
self.errList[i] = val
def GetDataTuple(self, i):
""" returns all relevant data about a particular model
**Arguments**
i: an integer indicating which model should be returned
**Returns**
a 3-tuple consisting of:
1) the model
2) its count
3) its error
"""
return (self.modelList[i], self.countList[i], self.errList[i])
def SetDataTuple(self, i, tup):
""" sets all relevant data for a particular tree in the forest
**Arguments**
- i: an integer indicating which model should be returned
- tup: a 3-tuple consisting of:
1) the model
2) its count
3) its error
**Note**
This is included for the sake of completeness, but you need to be
*very* careful when you use it.
"""
self.modelList[i], self.countList[i], self.errList[i] = tup
def GetAllData(self):
""" Returns everything we know
**Returns**
a 3-tuple consisting of:
1) our list of models
2) our list of model counts
3) our list of model errors
"""
return (self.modelList, self.countList, self.errList)
def __len__(self):
""" allows len(composite) to work
"""
return len(self.modelList)
def __getitem__(self, which):
""" allows composite[i] to work, returns the data tuple
"""
return self.GetDataTuple(which)
def __str__(self):
""" returns a string representation of the composite
"""
outStr = 'Composite\n'
for i in range(len(self.modelList)):
outStr = outStr + \
' Model % 4d: % 5d occurances %%% 5.2f average error\n'%(i,self.countList[i],
100.*self.errList[i])
return outStr
if __name__ == '__main__':
if 0:
from rdkit.ML.DecTree import DecTree
c = Composite()
n = DecTree.DecTreeNode(None, 'foo')
c.AddModel(n, 0.5)
c.AddModel(n, 0.5)
c.AverageErrors()
c.SortModels()
print(c)
qB = [[], [.5, 1, 1.5]]
exs = [['foo', 0], ['foo', .4], ['foo', .6], ['foo', 1.1], ['foo', 2.0]]
print('quantBounds:', qB)
for ex in exs:
q = c.QuantizeExample(ex, qB)
print(ex, q)
else:
pass
|
jandom/rdkit
|
rdkit/ML/Composite/Composite.py
|
Python
|
bsd-3-clause
| 20,782
|
__author__ = 'cruser42'
a_i = 5
b_i = 7
a_f = 5.0
b_f = 7.0
print("integer division")
print("a: {}, b: {}, a/b: {}".format(a_i, b_i, a_i/b_i))
print("float division")
print("a: {}, b: {}, a/b: {}".format(a_f, b_f, a_f/b_f))
print("mixed num float")
print("a_f: {}, b_i: {}, a/b: {}".format(a_f, b_i, a_f/b_i))
print("mixed denom float")
print("a_i: {}, b_f: {}, a/b: {}".format(a_i, b_f, a_i/b_f))
print("mixed denom float")
print("a_i: {}, b_i: {}, a_f: {}, a_i/b_i * a_f: {}".format(a_i, b_f, a_f, a_i/b_i * a_f))
print("mixed denom float")
print("a_i: {}, b_i: {}, a_f: {}, a_f * a_i/b_i {}".format(a_i, b_f, a_f, a_f * a_i/b_i))
print("mixed denom float")
print("a_i: {}, b_i: {}, a_f: {}, a_i/(b_i * a_f): {}".format(a_i, b_f, a_f, a_i/(b_i * a_f)))
print(a_i)
print(float(a_i))
# BIG NUMBER Experiments
# 101
# 1*4 + 0*2 + 1*1 = 5
#
# 123
# 1*100 + 2*10 + 3*1 = 123
# little endian vs big endian
# 2.22044604925e-16
|
cruser42/python_course
|
alex/ints_floats.py
|
Python
|
gpl-2.0
| 943
|
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2020 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from itest_support import IntegrationTestSupport
from pybuilder.errors import CircularTaskDependencyException
class Test(IntegrationTestSupport):
def test(self):
self.write_build_file("""
from pybuilder.core import task, depends, dependents
@task
@depends("task_c")
def task_a(project):
project.set_property("a", False)
@task
@depends("task_a")
def task_b(project):
project.set_property("a", True)
@task
@depends("task_a", "task_b")
def task_c(project):
project.set_property("c", True)
""")
reactor = self.prepare_reactor()
self.assertRaises(CircularTaskDependencyException, reactor.build, ["task_c", "task_a", "task_b"])
if __name__ == "__main__":
unittest.main()
|
pybuilder/pybuilder
|
src/integrationtest/python/should_raise_exception_when_detect_task_cycle_tests.py
|
Python
|
apache-2.0
| 1,410
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, GeekChimp - Franck Nijhof <franck@geekchimp.com>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: osx_defaults
author: Franck Nijhof (@frenck)
short_description: osx_defaults allows users to read, write, and delete Mac OS X user defaults from Ansible
description:
- osx_defaults allows users to read, write, and delete Mac OS X user defaults from Ansible scripts.
Mac OS X applications and other programs use the defaults system to record user preferences and other
information that must be maintained when the applications aren't running (such as default font for new
documents, or the position of an Info panel).
version_added: "2.0"
options:
domain:
description:
- The domain is a domain name of the form com.companyname.appname.
required: false
default: NSGlobalDomain
host:
description:
- The host on which the preference should apply. The special value "currentHost" corresponds to the
"-currentHost" switch of the defaults commandline tool.
required: false
default: null
version_added: "2.1"
key:
description:
- The key of the user preference
required: true
type:
description:
- The type of value to write.
required: false
default: string
choices: [ "array", "bool", "boolean", "date", "float", "int", "integer", "string" ]
array_add:
description:
- Add new elements to the array for a key which has an array as its value.
required: false
default: false
choices: [ "true", "false" ]
value:
description:
- The value to write. Only required when state = present.
required: false
default: null
state:
description:
- The state of the user defaults
required: false
default: present
choices: [ "present", "absent" ]
notes:
- Apple Mac caches defaults. You may need to logout and login to apply the changes.
'''
EXAMPLES = '''
- osx_defaults:
domain: com.apple.Safari
key: IncludeInternalDebugMenu
type: bool
value: true
state: present
- osx_defaults:
domain: NSGlobalDomain
key: AppleMeasurementUnits
type: string
value: Centimeters
state: present
- osx_defaults:
domain: com.apple.screensaver
host: currentHost
key: showClock
type: int
value: 1
- osx_defaults:
key: AppleMeasurementUnits
type: string
value: Centimeters
- osx_defaults:
key: AppleLanguages
type: array
value:
- en
- nl
- osx_defaults:
domain: com.geekchimp.macable
key: ExampleKeyToRemove
state: absent
'''
import datetime
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
# exceptions --------------------------------------------------------------- {{{
class OSXDefaultsException(Exception):
pass
# /exceptions -------------------------------------------------------------- }}}
# class MacDefaults -------------------------------------------------------- {{{
class OSXDefaults(object):
""" Class to manage Mac OS user defaults """
# init ---------------------------------------------------------------- {{{
""" Initialize this module. Finds 'defaults' executable and preps the parameters """
def __init__(self, **kwargs):
# Initial var for storing current defaults value
self.current_value = None
# Just set all given parameters
for key, val in kwargs.items():
setattr(self, key, val)
# Try to find the defaults executable
self.executable = self.module.get_bin_path(
'defaults',
required=False,
opt_dirs=self.path.split(':'),
)
if not self.executable:
raise OSXDefaultsException("Unable to locate defaults executable.")
# When state is present, we require a parameter
if self.state == "present" and self.value is None:
raise OSXDefaultsException("Missing value parameter")
# Ensure the value is the correct type
self.value = self._convert_type(self.type, self.value)
# /init --------------------------------------------------------------- }}}
# tools --------------------------------------------------------------- {{{
""" Converts value to given type """
def _convert_type(self, type, value):
if type == "string":
return str(value)
elif type in ["bool", "boolean"]:
if isinstance(value, basestring):
value = value.lower()
if value in [True, 1, "true", "1", "yes"]:
return True
elif value in [False, 0, "false", "0", "no"]:
return False
raise OSXDefaultsException("Invalid boolean value: {0}".format(repr(value)))
elif type == "date":
try:
return datetime.datetime.strptime(value.split("+")[0].strip(), "%Y-%m-%d %H:%M:%S")
except ValueError:
raise OSXDefaultsException(
"Invalid date value: {0}. Required format yyy-mm-dd hh:mm:ss.".format(repr(value))
)
elif type in ["int", "integer"]:
if not str(value).isdigit():
raise OSXDefaultsException("Invalid integer value: {0}".format(repr(value)))
return int(value)
elif type == "float":
try:
value = float(value)
except ValueError:
raise OSXDefaultsException("Invalid float value: {0}".format(repr(value)))
return value
elif type == "array":
if not isinstance(value, list):
raise OSXDefaultsException("Invalid value. Expected value to be an array")
return value
raise OSXDefaultsException('Type is not supported: {0}'.format(type))
""" Returns a normalized list of commandline arguments based on the "host" attribute """
def _host_args(self):
if self.host is None:
return []
elif self.host == 'currentHost':
return ['-currentHost']
else:
return ['-host', self.host]
""" Returns a list containing the "defaults" executable and any common base arguments """
def _base_command(self):
return [self.executable] + self._host_args()
""" Converts array output from defaults to an list """
@staticmethod
def _convert_defaults_str_to_list(value):
# Split output of defaults. Every line contains a value
value = value.splitlines()
# Remove first and last item, those are not actual values
value.pop(0)
value.pop(-1)
# Remove extra spaces and comma (,) at the end of values
value = [re.sub(',$', '', x.strip(' ')) for x in value]
return value
# /tools -------------------------------------------------------------- }}}
# commands ------------------------------------------------------------ {{{
""" Reads value of this domain & key from defaults """
def read(self):
# First try to find out the type
rc, out, err = self.module.run_command(self._base_command() + ["read-type", self.domain, self.key])
# If RC is 1, the key does not exists
if rc == 1:
return None
# If the RC is not 0, then terrible happened! Ooooh nooo!
if rc != 0:
raise OSXDefaultsException("An error occurred while reading key type from defaults: " + out)
# Ok, lets parse the type from output
type = out.strip().replace('Type is ', '')
# Now get the current value
rc, out, err = self.module.run_command(self._base_command() + ["read", self.domain, self.key])
# Strip output
out = out.strip()
# An non zero RC at this point is kinda strange...
if rc != 0:
raise OSXDefaultsException("An error occurred while reading key value from defaults: " + out)
# Convert string to list when type is array
if type == "array":
out = self._convert_defaults_str_to_list(out)
# Store the current_value
self.current_value = self._convert_type(type, out)
""" Writes value to this domain & key to defaults """
def write(self):
# We need to convert some values so the defaults commandline understands it
if isinstance(self.value, bool):
if self.value:
value = "TRUE"
else:
value = "FALSE"
elif isinstance(self.value, (int, float)):
value = str(self.value)
elif self.array_add and self.current_value is not None:
value = list(set(self.value) - set(self.current_value))
elif isinstance(self.value, datetime.datetime):
value = self.value.strftime('%Y-%m-%d %H:%M:%S')
else:
value = self.value
# When the type is array and array_add is enabled, morph the type :)
if self.type == "array" and self.array_add:
self.type = "array-add"
# All values should be a list, for easy passing it to the command
if not isinstance(value, list):
value = [value]
rc, out, err = self.module.run_command(self._base_command() + ['write', self.domain, self.key, '-' + self.type] + value)
if rc != 0:
raise OSXDefaultsException('An error occurred while writing value to defaults: ' + out)
""" Deletes defaults key from domain """
def delete(self):
rc, out, err = self.module.run_command(self._base_command() + ['delete', self.domain, self.key])
if rc != 0:
raise OSXDefaultsException("An error occurred while deleting key from defaults: " + out)
# /commands ----------------------------------------------------------- }}}
# run ----------------------------------------------------------------- {{{
""" Does the magic! :) """
def run(self):
# Get the current value from defaults
self.read()
# Handle absent state
if self.state == "absent":
if self.current_value is None:
return False
if self.module.check_mode:
return True
self.delete()
return True
# There is a type mismatch! Given type does not match the type in defaults
value_type = type(self.value)
if self.current_value is not None and not isinstance(self.current_value, value_type):
raise OSXDefaultsException("Type mismatch. Type in defaults: " + type(self.current_value).__name__)
# Current value matches the given value. Nothing need to be done. Arrays need extra care
if self.type == "array" and self.current_value is not None and not self.array_add and \
set(self.current_value) == set(self.value):
return False
elif self.type == "array" and self.current_value is not None and self.array_add and \
len(list(set(self.value) - set(self.current_value))) == 0:
return False
elif self.current_value == self.value:
return False
if self.module.check_mode:
return True
# Change/Create/Set given key/value for domain in defaults
self.write()
return True
# /run ---------------------------------------------------------------- }}}
# /class MacDefaults ------------------------------------------------------ }}}
# main -------------------------------------------------------------------- {{{
def main():
module = AnsibleModule(
argument_spec=dict(
domain=dict(
default="NSGlobalDomain",
required=False,
),
host=dict(
default=None,
required=False,
),
key=dict(
default=None,
),
type=dict(
default="string",
required=False,
choices=[
"array",
"bool",
"boolean",
"date",
"float",
"int",
"integer",
"string",
],
),
array_add=dict(
default=False,
required=False,
type='bool',
),
value=dict(
default=None,
required=False,
),
state=dict(
default="present",
required=False,
choices=[
"absent", "present"
],
),
path=dict(
default="/usr/bin:/usr/local/bin",
required=False,
)
),
supports_check_mode=True,
)
domain = module.params['domain']
host = module.params['host']
key = module.params['key']
type = module.params['type']
array_add = module.params['array_add']
value = module.params['value']
state = module.params['state']
path = module.params['path']
try:
defaults = OSXDefaults(module=module, domain=domain, host=host, key=key, type=type,
array_add=array_add, value=value, state=state, path=path)
changed = defaults.run()
module.exit_json(changed=changed)
except OSXDefaultsException:
e = get_exception()
module.fail_json(msg=e.message)
# /main ------------------------------------------------------------------- }}}
if __name__ == '__main__':
main()
|
kaarolch/ansible
|
lib/ansible/modules/system/osx_defaults.py
|
Python
|
gpl-3.0
| 14,430
|
"""This script generates nginx configure file for Tor."""
# -*- coding: utf-8 -*-
import codecs # UTF-8 support for the text files
def text2file(txt, filename):
"""Write the txt to the file."""
outputfile = codecs.open(filename, "w", "utf-8")
outputfile.write(txt)
outputfile.close()
def main():
"""Main function."""
port = 10000
nginx_conf = """log_format mycustomformat '$host $remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" $server_port';\n\n"""
for id_num in range(0,100):
nginx_conf = nginx_conf + "server {\n"
id_str = str(id_num).zfill(6)
nginx_conf = nginx_conf + "\tlisten 127.0.0.1:" + str(port+21) + ";\n"
nginx_conf = nginx_conf + "\tlisten 127.0.0.1:" + str(port+22) + ";\n"
nginx_conf = nginx_conf + "\tlisten 127.0.0.1:" + str(port+80) + ";\n"
nginx_conf = nginx_conf + "\troot /usr/share/nginx/html;\n"
nginx_conf = nginx_conf + "\tindex index.html index.htm;\n"
nginx_conf = nginx_conf + "\tserver_name " + id_str + ";\n"
nginx_conf = nginx_conf + "\taccess_log /var/log/nginx/hs_logs/" + id_str + ".access mycustomformat;\n"
nginx_conf = nginx_conf + "\tlocation / {\n"
nginx_conf = nginx_conf + "\t\ttry_files $uri $uri/ =404;\n"
nginx_conf = nginx_conf + "\t}\n"
nginx_conf = nginx_conf + "\terror_log /var/log/nginx/hs_logs/" + id_str + ".error error;\n"
nginx_conf = nginx_conf + "}\n\n"
port = port + 100
text2file( nginx_conf, "hs")
if __name__ == '__main__':
main()
|
juhanurmi/hs-honeypot
|
tools/generate_nginx_conf.py
|
Python
|
gpl-2.0
| 1,663
|
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import DRAWNS, STYLENS, PRESENTATIONNS
from element import Element
def StyleRefElement(stylename=None, classnames=None, **args):
qattrs = {}
if stylename is not None:
f = stylename.getAttrNS(STYLENS, 'family')
if f == 'graphic':
qattrs[(DRAWNS,u'style-name')]= stylename
elif f == 'presentation':
qattrs[(PRESENTATIONNS,u'style-name')]= stylename
else:
raise ValueError, "Style's family must be either 'graphic' or 'presentation'"
if classnames is not None:
f = classnames[0].getAttrNS(STYLENS, 'family')
if f == 'graphic':
qattrs[(DRAWNS,u'class-names')]= classnames
elif f == 'presentation':
qattrs[(PRESENTATIONNS,u'class-names')]= classnames
else:
raise ValueError, "Style's family must be either 'graphic' or 'presentation'"
return Element(qattributes=qattrs, **args)
def DrawElement(name=None, **args):
e = Element(name=name, **args)
if not args.has_key('displayname'):
e.setAttrNS(DRAWNS,'display-name', name)
return e
# Autogenerated
def A(**args):
args.setdefault('type', 'simple')
return Element(qname = (DRAWNS,'a'), **args)
def Applet(**args):
return Element(qname = (DRAWNS,'applet'), **args)
def AreaCircle(**args):
return Element(qname = (DRAWNS,'area-circle'), **args)
def AreaPolygon(**args):
return Element(qname = (DRAWNS,'area-polygon'), **args)
def AreaRectangle(**args):
return Element(qname = (DRAWNS,'area-rectangle'), **args)
def Caption(**args):
return StyleRefElement(qname = (DRAWNS,'caption'), **args)
def Circle(**args):
return StyleRefElement(qname = (DRAWNS,'circle'), **args)
def Connector(**args):
return StyleRefElement(qname = (DRAWNS,'connector'), **args)
def ContourPath(**args):
return Element(qname = (DRAWNS,'contour-path'), **args)
def ContourPolygon(**args):
return Element(qname = (DRAWNS,'contour-polygon'), **args)
def Control(**args):
return StyleRefElement(qname = (DRAWNS,'control'), **args)
def CustomShape(**args):
return StyleRefElement(qname = (DRAWNS,'custom-shape'), **args)
def Ellipse(**args):
return StyleRefElement(qname = (DRAWNS,'ellipse'), **args)
def EnhancedGeometry(**args):
return Element(qname = (DRAWNS,'enhanced-geometry'), **args)
def Equation(**args):
return Element(qname = (DRAWNS,'equation'), **args)
def FillImage(**args):
args.setdefault('type', 'simple')
return DrawElement(qname = (DRAWNS,'fill-image'), **args)
def FloatingFrame(**args):
args.setdefault('type', 'simple')
return Element(qname = (DRAWNS,'floating-frame'), **args)
def Frame(**args):
return StyleRefElement(qname = (DRAWNS,'frame'), **args)
def G(**args):
return StyleRefElement(qname = (DRAWNS,'g'), **args)
def GluePoint(**args):
return Element(qname = (DRAWNS,'glue-point'), **args)
def Gradient(**args):
return DrawElement(qname = (DRAWNS,'gradient'), **args)
def Handle(**args):
return Element(qname = (DRAWNS,'handle'), **args)
def Hatch(**args):
return DrawElement(qname = (DRAWNS,'hatch'), **args)
def Image(**args):
return Element(qname = (DRAWNS,'image'), **args)
def ImageMap(**args):
return Element(qname = (DRAWNS,'image-map'), **args)
def Layer(**args):
return Element(qname = (DRAWNS,'layer'), **args)
def LayerSet(**args):
return Element(qname = (DRAWNS,'layer-set'), **args)
def Line(**args):
return StyleRefElement(qname = (DRAWNS,'line'), **args)
def Marker(**args):
return DrawElement(qname = (DRAWNS,'marker'), **args)
def Measure(**args):
return StyleRefElement(qname = (DRAWNS,'measure'), **args)
def Object(**args):
return Element(qname = (DRAWNS,'object'), **args)
def ObjectOle(**args):
return Element(qname = (DRAWNS,'object-ole'), **args)
def Opacity(**args):
return DrawElement(qname = (DRAWNS,'opacity'), **args)
def Page(**args):
return Element(qname = (DRAWNS,'page'), **args)
def PageThumbnail(**args):
return StyleRefElement(qname = (DRAWNS,'page-thumbnail'), **args)
def Param(**args):
return Element(qname = (DRAWNS,'param'), **args)
def Path(**args):
return StyleRefElement(qname = (DRAWNS,'path'), **args)
def Plugin(**args):
args.setdefault('type', 'simple')
return Element(qname = (DRAWNS,'plugin'), **args)
def Polygon(**args):
return StyleRefElement(qname = (DRAWNS,'polygon'), **args)
def Polyline(**args):
return StyleRefElement(qname = (DRAWNS,'polyline'), **args)
def Rect(**args):
return StyleRefElement(qname = (DRAWNS,'rect'), **args)
def RegularPolygon(**args):
return StyleRefElement(qname = (DRAWNS,'regular-polygon'), **args)
def StrokeDash(**args):
return DrawElement(qname = (DRAWNS,'stroke-dash'), **args)
def TextBox(**args):
return Element(qname = (DRAWNS,'text-box'), **args)
|
pacoqueen/odfpy
|
odf/draw.py
|
Python
|
gpl-2.0
| 5,750
|
from utils.header import MagicField, Field
from load_command import LoadCommandHeader, LoadCommandCommand
class PrebindCksumCommand(LoadCommandHeader):
ENDIAN = None
FIELDS = (
MagicField('cmd', 'I', {LoadCommandCommand.COMMANDS['LC_DYSYMTAB']: 'LC_DYSYMTAB'}),
Field('cmdsize', 'I'),
Field('cksum', 'I'),
)
def __init__(self, bytes_=None, **kwargs):
self.cksum = None
super(PrebindCksumCommand, self).__init__(bytes_, **kwargs)
|
hkkwok/MachOTool
|
mach_o/headers/prebind_cksum_command.py
|
Python
|
apache-2.0
| 488
|
class InstanceResource(object):
def __init__(self, version):
"""
:param Version version:
"""
self._version = version
""" :type: Version """
|
twilio/twilio-python
|
twilio/base/instance_resource.py
|
Python
|
mit
| 185
|
import src.lib.modules.memes
import src.lib.modules.comics
import src.lib.modules.plusplus
import src.lib.modules.flipcoin
import src.lib.modules.calc
import src.lib.modules.poll
import src.lib.modules.magicball
import src.lib.modules.define
import src.lib.modules.help
import src.lib.modules.clear
import src.lib.runtimes.games.mathwars
import src.lib.runtimes.games.reaction
import src.lib.runtimes.games.scrabble
import importlib
def handle(payload, mds, iph):
raw = mds.get()[str(payload.server.id)]
i2 = 0
i4 = 0
io = None
external_command = False
for i in raw:
for i3 in raw[i]:
if i3[0] == payload.content:
external_command = True
if not iph.has(i):
iph.add(importlib.import_module("src.lib.modules.ext." + i), i)
io = iph.get()[0][iph.get()[1].index(i)]
break
i4 += 1
if not external_command:
if payload.content.lower() == ";help":
return ["Single", src.lib.modules.help.main(payload, mds)]
elif payload.content.lower() == ";flipcoin":
return ["Single", src.lib.modules.flipcoin.main()]
elif payload.content.lower() == ";comic":
return ["Single", src.lib.modules.comics.main()]
elif payload.content.lower().startswith(";poll"):
return ["Single", src.lib.modules.poll.main(payload)]
elif payload.content.lower().startswith(";define"):
return ["Single", src.lib.modules.define.main(payload)]
elif payload.content.lower().startswith(";weather"):
return ["Single", src.lib.modules.weather.main(payload)]
elif payload.content.lower() == ";meme":
return ["Single", src.lib.modules.memes.main()]
elif payload.content.lower() == ";stats":
return ["Single", src.lib.modules.plusplus.main_stats(payload)]
elif payload.content.lower().startswith(";++") or payload.content.lower().startswith(";--"):
return ["Single", src.lib.modules.plusplus.main_alter(payload)]
elif payload.content.lower().startswith(";calc "):
return ["Single", src.lib.modules.calc.main(payload)]
elif payload.content.lower().startswith(";scrabble"):
return ["Socket", "Scrabble"]
elif payload.content.lower().startswith(";music"):
return ["Socket", "Music"]
elif payload.content.lower().startswith(";8ball"):
return ["Single", src.lib.modules.magicball.main()]
elif payload.content.lower().startswith(";clear"):
return ["Single", src.lib.modules.clear.main(payload)]
else:
return ["Single", io.main(payload)]
|
ubidiscordbot/ubi
|
src/lib/essentials/commandHandler.py
|
Python
|
mit
| 2,712
|
'''
Written by JT Fuchs, UNC.
PURPOSE: This program takes ZZ Ceti observations with Goodman and runs the full pipeline on a night. Uses ReduceSpec.py, spectral_extraction.py, Wavelenght_Calibration.py, continuum_normalization.py, flux_calibration.py, and diagnostics.py (and all dependencies therein).
DIRECTORY FILES THAT SHOULD EXIST:
listZero - text file containing list of bias images to combine
listFlat - text file containing list of flat field images to combine. If both blue and red set, give all blue files first, then all red files.
listSpec - text file containing list of spectra to combine. Organize by target.
listFe - text file containing list of Iron lamps to combine. If both blue and red set, give all blue files first, then all red files.
'''
import ReduceSpec
import spectral_extraction
import Wavelength_Calibration
import continuum_normalization
import flux_calibration
import diagnostics
from glob import glob
import config
import os
if config.cautious:
print "config.cautious ==True, so you're gonna have to be paying attention."
#=========================
#Begin Fits Reduction
#=========================
ReduceSpec.reduce_now(['script_name','listZero','listFlat','listSpec','listFe'])
#========================
#Begin Spectral Extraction
#========================
print 'Beginning spectral extraction.'
spec_files = sorted(glob('cftb*fits'))
single_spec_list = []
for x in spec_files:
if ('cftb.0' in x) or ('cftb.1' in x) or ('cftb.2' in x):
single_spec_list.append(x)
for x in single_spec_list:
spec_files.remove(x)
spec_files = sorted(spec_files)
lamp_file_blue = sorted(glob('tFe*blue*fits'))
lamp_file_red = sorted(glob('tFe*red*fits'))
print "lamp_file_blue: ", lamp_file_blue
print "lamp_file_red: ", lamp_file_red
#Search for FWHM and trace file for each spectrum. If it does not exist, these go to None and will be fit and saved during the extraction.
trace_files = []
FWHM_files = []
for x in spec_files:
trace_name = '*' + x[5:-5] + '*trace.npy'
new_trace = glob(trace_name)
if len(new_trace) == 0:
trace_files.append(None)
else:
trace_files.append(new_trace[0])
fwhm_name = '*' + x[5:-5] + '*poly.npy'
new_fwhm = glob(fwhm_name)
if len(new_fwhm) == 0:
FWHM_files.append(None)
else:
FWHM_files.append(new_fwhm[0])
for x in spec_files:
if 'blue' in x.lower():
lamp_file = lamp_file_blue[0]
elif 'red' in x.lower():
lamp_file = lamp_file_red[0]
else:
print "no colors in spec name... again"
print "using this file:"
print sorted(glob('tFe*fits'))
lamp_file_a= sorted(glob('tFe*fits')+glob('t*_fe*fits'))
print "lamp_file_a: ", lamp_file_a
lamp_file = lamp_file_a[0]
print lamp_file
FWHM_thisfile = FWHM_files[spec_files.index(x)]
trace_thisfile = trace_files[spec_files.index(x)]
if trace_thisfile != None:
trace_exist_file = True
else:
trace_exist_file = False
print ''
print x, lamp_file,trace_thisfile, FWHM_thisfile
#Must add in option of not have trace file or FWHM file
#if no FWHMfile, FWHMfile=None
spectral_extraction.extract_now(x,lamp_file,FWHMfile=FWHM_thisfile,tracefile=trace_thisfile,trace_exist=trace_exist_file)
#=========================
# Begin Wavelength Calibration
#=========================
print '\n Beginning Wavelength Calibration'
spec_files = sorted(glob('cftb*ms.fits'))
lamp_files = sorted(glob('tFe*ms.fits')+glob("t*_fe*ms.fits"))
def check_offsets():
print "checking for offsets in this directory: ", os.getcwd()
offset_file = glob('offsets.txt') #Offset file must be structured as blue, then red
print "offset_file: ", offset_file
if len(offset_file) == 0:
offset_file = None
else:
offset_file = offset_file[0]
return offset_file
#offset_file = glob('offsets.txt') #Offset file must be structured as blue, then red
#if len(offset_file) == 0:
#offset_file = None
#else:
#offset_file = offset_file[0]
offset_file = check_offsets() #check for offset files once before going through this.
starting_offset_file= offset_file #This is the determinant as to whether or not we should be
#print spec_files
#print lamp_files
counter_b = 0
counter_r = 0
#Need to carefully match up the correct lamp and spectrum files. This seems to work well.
#current setup as of 2017-08-15 relies on blue calibration before red
for x in lamp_files:
if 'blue' in x.lower():
lamp_color = 'blue'
elif 'red' in x.lower():
lamp_color = 'red'
for y in spec_files:
###if (y[5:y.find('_930')] in x) and (y[y.find('_930'):y.find('_930')+8] in x):
try:
if (lamp_color in y.lower()) and (y[5:y.find('_930')] in x):
print x, y, offset_file
if (lamp_color== 'blue'):
if counter_b > 0:
print "recognized counter_b"
offset_file =check_offsets() #will now know that there is an offset file after the first run.
print "counter_b: ", counter_b
counter_b+=1
if (lamp_color == 'red'):
if counter_r > 0:
print "recognized counter_r"
offset_file = check_offsets()
elif (starting_offset_file == None):
offset_file = starting_offset_file
print "counter_r: ", counter_r
counter_r += 1
if offset_file == None:
plotalot = True
else:
plotalot = False
print x,y, offset_file
Wavelength_Calibration.calibrate_now(x,y,'no',config.zzceti,offset_file,plotall=plotalot)
except NameError as nameerror:
#protects from the lamp_color not getting assigned in those if statements up there, but it also catches errors where one of the variables isn't defined too, doesn't it?
print "NameError: ", nameerror
print "still no colors in files for like the 200th time."
print "Filename that has no colors: ", y
if offset_file== None:
plotalot= True
else:
plotalot= False
Wavelength_Calibration.calibrate_now(x,y,'no',config.zzceti,offset_file,plotall=plotalot) #changedthisvalue The 4th arg 'no' is whether or not we're looking at a zzceti, and the 'no' setting is kind of a roll of the dice.
#=========================
#Begin Continuum Normalization
#=========================
print '\n Begin continuum normalization.'
continuum_files = sorted(glob('wcftb*ms.fits'))
#print continuum_files
x = 0
while x < len(continuum_files):
if x == len(continuum_files)-1:
#print continuum_files[x]
continuum_normalization.normalize_now(continuum_files[x],None,False,plotall=False)
x += 1
elif continuum_files[x][0:continuum_files[x].find('930')] == continuum_files[x+1][0:continuum_files[x].find('930')]:
#print continuum_files[x],continuum_files[x+1]
continuum_normalization.normalize_now(continuum_files[x],continuum_files[x+1],True,plotall=False)
x += 2
else:
#print continuum_files[x]
continuum_normalization.normalize_now(continuum_files[x],None,False,plotall=False)
x += 1
#=========================
#Begin Flux Calibration
#=========================
print '\nBegin flux calibration.'
#We should use the same files are for the continuum normalization. But if you want to change that for some reason, adjust below.
'''
continuum_files = sorted(glob('wcftb*ms.fits'))
single_spec_list = []
for x in continuum_files:
if 'flux' in x:
single_spec_list.append(x)
for x in single_spec_list:
continuum_files.remove(x)
continuum_files = sorted(continuum_files)
#print continuum_files
'''
stdlist = None
fluxlist = None
if config.to_flux:
flux_calibration.flux_calibrate_now(stdlist,fluxlist,continuum_files,extinct_correct=True,masterresp=True)
if not config.to_flux:
print "Not Flux calibrating since to_flux: " , config.to_flux
#=========================
#Begin Flux Calibration
#=========================
print 'Running diagnostics.'
diagnostics.diagnostic_now()
|
bkaiser94/red_cam_pipeline
|
reduceall.py
|
Python
|
mit
| 8,368
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from askbot.migrations_api import safe_add_column
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding fields
safe_add_column('auth_user', 'new_response_count', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
safe_add_column('auth_user', 'seen_response_count', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
def backwards(self, orm):
# Deleting fields
db.delete_column('auth_user', 'new_response_count')
db.delete_column('auth_user', 'seen_response_count')
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'through': "'ActivityAuditStatus'", 'to': "orm['auth.User']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['askbot.Question']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'askbot.anonymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'askbot.answer': {
'Meta': {'object_name': 'Answer', 'db_table': "u'answer'"},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['askbot.Question']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.answerrevision': {
'Meta': {'object_name': 'AnswerRevision', 'db_table': "u'answer_revision'"},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['askbot.Answer']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answerrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.Badge']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badge': {
'Meta': {'unique_together': "(('name', 'type'),)", 'object_name': 'Badge', 'db_table': "u'badge'"},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'through': "'Award'", 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multiple': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'type': ('django.db.models.fields.SmallIntegerField', [], {})
},
'askbot.comment': {
'Meta': {'object_name': 'Comment', 'db_table': "u'comment'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'html': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2048'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'askbot.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoritequestion': {
'Meta': {'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['auth.User']"})
},
'askbot.flaggeditem': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'FlaggedItem', 'db_table': "u'flagged_item'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'flagged_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flaggeditems'", 'to': "orm['auth.User']"})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.question': {
'Meta': {'object_name': 'Question', 'db_table': "u'question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'answer_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'to': "orm['auth.User']"}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'closed_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'favorite_questions'", 'through': "'FavoriteQuestion'", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_questions'", 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_active_in_questions'", 'to': "orm['auth.User']"}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'questions'", 'to': "orm['askbot.Tag']"}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.questionrevision': {
'Meta': {'object_name': 'QuestionRevision', 'db_table': "u'question_revision'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questionrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['askbot.Question']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'askbot.questionview': {
'Meta': {'object_name': 'QuestionView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Question']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_views'", 'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.vote': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'hide_ignored_questions': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'tag_filter_setting': ('django.db.models.fields.CharField', [], {'default': "'ignored'", 'max_length': '16'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot']
|
PearsonIOKI/compose-forum
|
askbot/migrations/0026_add_seen_and_new_response_counts_to_user.py
|
Python
|
gpl-3.0
| 27,178
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2016 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
import importlib, os
from glob import glob
from cStringIO import StringIO
from common import write_if_changed
def discover():
# find packages
packages = {'horton': []}
for fn in glob('../horton/*/__init__.py'):
subpackage = fn.split('/')[2]
if subpackage == 'test':
continue
packages['horton.%s' % subpackage] = []
# find modules
for package, modules in packages.iteritems():
stub = package.replace('.', '/')
for fn in sorted(glob('../%s/*.py' % stub) + glob('../%s/*.so' % stub)):
module = fn.split('/')[-1][:-3]
if module == '__init__':
continue
modules.append(module)
for fn in sorted(glob('../%s/*.h' % stub)):
module = fn.split('/')[-1]
modules.append(module)
return packages
def get_first_docline(module):
m = importlib.import_module(module)
if m.__doc__ is not None:
lines = m.__doc__.split('\n')
if len(lines) > 0:
return lines[0]
return 'FIXME! Write module docstring.'
def get_first_doxygenline(fn_h):
with open('../%s' % fn_h) as f:
for line in f:
if line.startswith('// UPDATELIBDOCTITLE:'):
return line[21:].strip()
raise IOError('UPDATELIBDOCTITLE missing in %s' % fn_h)
def underline(line, char, f):
print >> f, line
print >> f, char*len(line)
print >> f
def write_disclaimer(f):
print >> f, '..'
print >> f, ' This file is automatically generated. Do not make '
print >> f, ' changes as these will be overwritten. Rather edit '
print >> f, ' the documentation in the source code.'
print >> f
def main():
packages = discover()
# Write new/updated rst files if needed
fns_rst = []
for package, modules in sorted(packages.iteritems()):
# write the new file to a StringIO
f1 = StringIO()
write_disclaimer(f1)
underline('``%s`` -- %s' % (package, get_first_docline(package)), '#', f1)
print >> f1
print >> f1, '.. automodule::', package
print >> f1, ' :members:'
print >> f1
print >> f1, '.. toctree::'
print >> f1, ' :maxdepth: 1'
print >> f1, ' :numbered:'
print >> f1
for module in modules:
f2 = StringIO()
write_disclaimer(f2)
if module.endswith('.h'):
#full = package + '/' + module
fn_h = package.replace('.', '/') + '/' + module
underline('``%s`` -- %s' % (fn_h, get_first_doxygenline(fn_h)), '#', f2)
print >> f2, '.. doxygenfile::', fn_h
print >> f2, ' :project: horton'
print >> f2
print >> f2
else:
full = package + '.' + module
underline('``%s`` -- %s' % (full, get_first_docline(full)), '#', f2)
print >> f2, '.. automodule::', full
print >> f2, ' :members:'
print >> f2
print >> f2
# write if the contents have changed
rst_name = 'mod_%s_%s' % (package.replace('.', '_'), module.replace('.', '_'))
fn2_rst = 'lib/%s.rst' % rst_name
fns_rst.append(fn2_rst)
write_if_changed(fn2_rst, f2.getvalue())
print >> f1, ' %s' % rst_name
# write if the contents have changed
fn1_rst = 'lib/pck_%s.rst' % package.replace('.', '_')
fns_rst.append(fn1_rst)
write_if_changed(fn1_rst, f1.getvalue())
# Remove other rst files
for fn_rst in glob('lib/*.rst'):
if fn_rst not in fns_rst:
print 'Removing %s' % fn_rst
os.remove(fn_rst)
if __name__ == '__main__':
main()
|
crisely09/horton
|
doc/update_lib_doc.py
|
Python
|
gpl-3.0
| 4,657
|
import urllib2, sys
import xml.etree.ElementTree as etree
try: zipcode = sys.argv[1]
except: zipcode = '1700003'
resp = urllib2.urlopen('http://zip.cgis.biz/xml/zip.php?zn=%s'%zipcode).read()
output = {}
tree = etree.fromstring(resp)
for e in tree[-1]:
output[e.attrib.keys()[0]] = e.attrib.values()[0]
print output
print output['state'] + output['city'] + output['address']
|
yk-tanigawa/2015gci
|
webdata/sample.py
|
Python
|
mit
| 383
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures as fx
from oslo_log import log as logging
import testtools
from cinder.tests import fixtures
class TestLogging(testtools.TestCase):
def test_default_logging(self):
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
# there should be a null handler as well at DEBUG
self.assertEqual(2, len(root.handlers), root.handlers)
log = logging.getLogger(__name__)
log.info("at info")
log.debug("at debug")
self.assertIn("at info", stdlog.logger.output)
self.assertNotIn("at debug", stdlog.logger.output)
# broken debug messages should still explode, even though we
# aren't logging them in the regular handler
self.assertRaises(TypeError, log.debug, "this is broken %s %s", "foo")
# and, ensure that one of the terrible log messages isn't
# output at info
warn_log = logging.getLogger('migrate.versioning.api')
warn_log.info("warn_log at info, should be skipped")
warn_log.error("warn_log at error")
self.assertIn("warn_log at error", stdlog.logger.output)
self.assertNotIn("warn_log at info", stdlog.logger.output)
def test_debug_logging(self):
self.useFixture(fx.EnvironmentVariable('OS_DEBUG', '1'))
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
# there should no longer be a null handler
self.assertEqual(1, len(root.handlers), root.handlers)
log = logging.getLogger(__name__)
log.info("at info")
log.debug("at debug")
self.assertIn("at info", stdlog.logger.output)
self.assertIn("at debug", stdlog.logger.output)
|
Hybrid-Cloud/cinder
|
cinder/tests/unit/test_fixtures.py
|
Python
|
apache-2.0
| 2,439
|
from __future__ import absolute_import
import difflib
from functools import wraps, partial
import re
from flask import request, url_for, current_app
from flask import abort as original_flask_abort
from flask.views import MethodView
from flask.signals import got_request_exception
from werkzeug.exceptions import HTTPException, MethodNotAllowed, NotFound
from werkzeug.http import HTTP_STATUS_CODES
from werkzeug.wrappers import Response as ResponseBase
from flask.ext.restful.utils import error_data, unpack
from flask.ext.restful.representations.json import output_json
import sys
from flask.helpers import _endpoint_from_view_func
from types import MethodType
try:
#noinspection PyUnresolvedReferences
from collections import OrderedDict
except ImportError:
from .utils.ordereddict import OrderedDict
__all__ = ('Api', 'Resource', 'marshal', 'marshal_with', 'abort')
def abort(http_status_code, **kwargs):
"""Raise a HTTPException for the given http_status_code. Attach any keyword
arguments to the exception for later processing.
"""
#noinspection PyUnresolvedReferences
try:
original_flask_abort(http_status_code)
except HTTPException as e:
if len(kwargs):
e.data = kwargs
raise e
DEFAULT_REPRESENTATIONS = {'application/json': output_json}
class Api(object):
"""
The main entry point for the application.
You need to initialize it with a Flask Application: ::
>>> app = Flask(__name__)
>>> api = restful.Api(app)
Alternatively, you can use :meth:`init_app` to set the Flask application
after it has been constructed.
:param app: the Flask application object
:type app: flask.Flask
:param prefix: Prefix all routes with a value, eg v1 or 2010-04-01
:type prefix: str
:param default_mediatype: The default media type to return
:type default_mediatype: str
:param decorators: Decorators to attach to every resource
:type decorators: list
:param catch_all_404s: Use :meth:`handle_error`
to handle 404 errors throughout your app
:param url_part_order: A string that controls the order that the pieces
of the url are concatenated when the full url is constructed. 'b'
is the blueprint (or blueprint registration) prefix, 'a' is the api
prefix, and 'e' is the path component the endpoint is added with
:type catch_all_404s: bool
"""
def __init__(self, app=None, prefix='',
default_mediatype='application/json', decorators=None,
catch_all_404s=False, url_part_order='bae'):
self.representations = dict(DEFAULT_REPRESENTATIONS)
self.urls = {}
self.prefix = prefix
self.default_mediatype = default_mediatype
self.decorators = decorators if decorators else []
self.catch_all_404s = catch_all_404s
self.url_part_order = url_part_order
self.blueprint_setup = None
self.endpoints = set()
self.resources = []
self.app = None
if app is not None:
self.app = app
self.init_app(app)
def init_app(self, app):
"""Initialize this class with the given :class:`flask.Flask`
application or :class:`flask.Blueprint` object.
:param app: the Flask application or blueprint object
:type app: flask.Flask
:type app: flask.Blueprint
Examples::
api = Api()
api.init_app(app)
api.add_resource(...)
"""
self.blueprint = None
# If app is a blueprint, defer the initialization
try:
app.record(self._deferred_blueprint_init)
# Flask.Blueprint has a 'record' attribute, Flask.Api does not
except AttributeError:
self._init_app(app)
else:
self.blueprint = app
def _complete_url(self, url_part, registration_prefix):
"""This method is used to defer the construction of the final url in
the case that the Api is created with a Blueprint.
:param url_part: The part of the url the endpoint is registered with
:param registration_prefix: The part of the url contributed by the
blueprint. Generally speaking, BlueprintSetupState.url_prefix
"""
parts = {'b' : registration_prefix,
'a' : self.prefix,
'e' : url_part}
return ''.join(parts[key] for key in self.url_part_order if parts[key])
@staticmethod
def _blueprint_setup_add_url_rule_patch(blueprint_setup, rule, endpoint=None, view_func=None, **options):
"""Method used to patch BlueprintSetupState.add_url_rule for setup
state instance corresponding to this Api instance. Exists primarily
to enable _complete_url's function.
:param blueprint_setup: The BlueprintSetupState instance (self)
:param rule: A string or callable that takes a string and returns a
string(_complete_url) that is the url rule for the endpoint
being registered
:param endpoint: See BlueprintSetupState.add_url_rule
:param view_func: See BlueprintSetupState.add_url_rule
:param **options: See BlueprintSetupState.add_url_rule
"""
if callable(rule):
rule = rule(blueprint_setup.url_prefix)
elif blueprint_setup.url_prefix:
rule = blueprint_setup.url_prefix + rule
options.setdefault('subdomain', blueprint_setup.subdomain)
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
defaults = blueprint_setup.url_defaults
if 'defaults' in options:
defaults = dict(defaults, **options.pop('defaults'))
blueprint_setup.app.add_url_rule(rule, '%s.%s' % (blueprint_setup.blueprint.name, endpoint),
view_func, defaults=defaults, **options)
def _deferred_blueprint_init(self, setup_state):
"""Synchronize prefix between blueprint/api and registration options, then
perform initialization with setup_state.app :class:`flask.Flask` object.
When a :class:`flask_restful.Api` object is initialized with a blueprint,
this method is recorded on the blueprint to be run when the blueprint is later
registered to a :class:`flask.Flask` object. This method also monkeypatches
BlueprintSetupState.add_url_rule with _blueprint_setup_add_url_rule_patch.
:param setup_state: The setup state object passed to deferred functions
during blueprint registration
:type setup_state: flask.blueprints.BlueprintSetupState
"""
self.blueprint_setup = setup_state
if setup_state.add_url_rule.__name__ != '_blueprint_setup_add_url_rule_patch':
setup_state._original_add_url_rule = setup_state.add_url_rule
setup_state.add_url_rule = MethodType(Api._blueprint_setup_add_url_rule_patch,
setup_state)
if not setup_state.first_registration:
raise ValueError('flask-restful blueprints can only be registered once.')
self._init_app(setup_state.app)
def _init_app(self, app):
"""Perform initialization actions with the given :class:`flask.Flask`
object.
:param app: The flask application object
:type app: flask.Flask
"""
app.handle_exception = partial(self.error_router, app.handle_exception)
app.handle_user_exception = partial(self.error_router, app.handle_user_exception)
if len(self.resources) > 0:
for resource, urls, kwargs in self.resources:
self._register_view(app, resource, *urls, **kwargs)
def owns_endpoint(self, endpoint):
"""Tests if an endpoint name (not path) belongs to this Api. Takes
in to account the Blueprint name part of the endpoint name.
:param endpoint: The name of the endpoint being checked
:return: bool
"""
if self.blueprint:
if endpoint.startswith(self.blueprint.name):
endpoint = endpoint.split(self.blueprint.name + '.', 1)[-1]
else:
return False
return endpoint in self.endpoints
def _should_use_fr_error_handler(self):
""" Determine if error should be handled with FR or default Flask
The goal is to return Flask error handlers for non-FR-related routes,
and FR errors (with the correct media type) for FR endpoints. This
method currently handles 404 and 405 errors.
:return: bool
"""
adapter = current_app.create_url_adapter(request)
try:
adapter.match()
except MethodNotAllowed as e:
# Check if the other HTTP methods at this url would hit the Api
valid_route_method = e.valid_methods[0]
rule, _ = adapter.match(method=valid_route_method, return_rule=True)
return self.owns_endpoint(rule.endpoint)
except NotFound:
return self.catch_all_404s
except:
# Werkzeug throws other kinds of exceptions, such as Redirect
pass
def _has_fr_route(self):
"""Encapsulating the rules for whether the request was to a Flask endpoint"""
# 404's, 405's, which might not have a url_rule
if self._should_use_fr_error_handler():
return True
# for all other errors, just check if FR dispatched the route
if not request.url_rule:
return False
return self.owns_endpoint(request.url_rule.endpoint)
def error_router(self, original_handler, e):
"""This function decides whether the error occured in a flask-restful
endpoint or not. If it happened in a flask-restful endpoint, our
handler will be dispatched. If it happened in an unrelated view, the
app's original error handler will be dispatched.
:param original_handler: the original Flask error handler for the app
:type original_handler: function
:param e: the exception raised while handling the request
:type e: Exception
"""
if self._has_fr_route():
return self.handle_error(e)
return original_handler(e)
def handle_error(self, e):
"""Error handler for the API transforms a raised exception into a Flask
response, with the appropriate HTTP status code and body.
:param e: the raised Exception object
:type e: Exception
"""
got_request_exception.send(current_app._get_current_object(), exception=e)
if not hasattr(e, 'code') and current_app.propagate_exceptions:
exc_type, exc_value, tb = sys.exc_info()
if exc_value is e:
raise
else:
raise e
code = getattr(e, 'code', 500)
data = getattr(e, 'data', error_data(code))
if code >= 500:
# There's currently a bug in Python3 that disallows calling
# logging.exception() when an exception hasn't actually be raised
if sys.exc_info() == (None, None, None):
current_app.logger.error("Internal Error")
else:
current_app.logger.exception("Internal Error")
help_on_404 = current_app.config.get("ERROR_404_HELP", True)
if code == 404 and help_on_404 and ('message' not in data or
data['message'] == HTTP_STATUS_CODES[404]):
rules = dict([(re.sub('(<.*>)', '', rule.rule), rule.rule)
for rule in current_app.url_map.iter_rules()])
close_matches = difflib.get_close_matches(request.path, rules.keys())
if close_matches:
# If we already have a message, add punctuation and continue it.
if "message" in data:
data["message"] += ". "
else:
data["message"] = ""
data['message'] += 'You have requested this URI [' + request.path + \
'] but did you mean ' + \
' or '.join((rules[match]
for match in close_matches)) + ' ?'
resp = self.make_response(data, code)
if code == 401:
resp = self.unauthorized(resp)
return resp
def mediatypes_method(self):
"""Return a method that returns a list of mediatypes
"""
return lambda resource_cls: self.mediatypes() + [self.default_mediatype]
def add_resource(self, resource, *urls, **kwargs):
"""Adds a resource to the api.
:param resource: the class name of your resource
:type resource: :class:`Resource`
:param urls: one or more url routes to match for the resource, standard
flask routing rules apply. Any url variables will be
passed to the resource method as args.
:type urls: str
:param endpoint: endpoint name (defaults to :meth:`Resource.__name__.lower`
Can be used to reference this route in :class:`fields.Url` fields
:type endpoint: str
Additional keyword arguments not specified above will be passed as-is
to :meth:`flask.Flask.add_url_rule`.
Examples::
api.add_resource(HelloWorld, '/', '/hello')
api.add_resource(Foo, '/foo', endpoint="foo")
api.add_resource(FooSpecial, '/special/foo', endpoint="foo")
"""
if self.app is not None:
self._register_view(self.app, resource, *urls, **kwargs)
else:
self.resources.append((resource, urls, kwargs))
def _register_view(self, app, resource, *urls, **kwargs):
endpoint = kwargs.pop('endpoint', None) or resource.__name__.lower()
self.endpoints.add(endpoint)
if endpoint in app.view_functions.keys():
previous_view_class = app.view_functions[endpoint].__dict__['view_class']
# if you override the endpoint with a different class, avoid the collision by raising an exception
if previous_view_class != resource:
raise ValueError('This endpoint (%s) is already set to the class %s.' % (endpoint, previous_view_class.__name__))
resource.mediatypes = self.mediatypes_method() # Hacky
resource.endpoint = endpoint
resource_func = self.output(resource.as_view(endpoint))
for decorator in self.decorators:
resource_func = decorator(resource_func)
for url in urls:
# If this Api has a blueprint
if self.blueprint:
# And this Api has been setup
if self.blueprint_setup:
# Set the rule to a string directly, as the blueprint is already
# set up.
rule = self._complete_url(url, self.blueprint_setup.url_prefix)
else:
# Set the rule to a function that expects the blueprint prefix
# to construct the final url. Allows deferment of url finalization
# in the case that the associated Blueprint has not yet been
# registered to an application, so we can wait for the registration
# prefix
rule = partial(self._complete_url, url)
else:
# If we've got no Blueprint, just build a url with no prefix
rule = self._complete_url(url, '')
# Add the url to the application or blueprint
app.add_url_rule(rule, view_func=resource_func, **kwargs)
def output(self, resource):
"""Wraps a resource (as a flask view function), for cases where the
resource does not directly return a response object
:param resource: The resource as a flask view function
"""
@wraps(resource)
def wrapper(*args, **kwargs):
resp = resource(*args, **kwargs)
if isinstance(resp, ResponseBase): # There may be a better way to test
return resp
data, code, headers = unpack(resp)
return self.make_response(data, code, headers=headers)
return wrapper
def url_for(self, resource, **values):
"""Generates a URL to the given resource."""
return url_for(resource.endpoint, **values)
def make_response(self, data, *args, **kwargs):
"""Looks up the representation transformer for the requested media
type, invoking the transformer to create a response object. This
defaults to (application/json) if no transformer is found for the
requested mediatype.
:param data: Python object containing response data to be transformed
"""
for mediatype in self.mediatypes() + [self.default_mediatype]:
if mediatype in self.representations:
resp = self.representations[mediatype](data, *args, **kwargs)
resp.headers['Content-Type'] = mediatype
return resp
def mediatypes(self):
"""Returns a list of requested mediatypes sent in the Accept header"""
return [h for h, q in request.accept_mimetypes]
def representation(self, mediatype):
"""Allows additional representation transformers to be declared for the
api. Transformers are functions that must be decorated with this
method, passing the mediatype the transformer represents. Three
arguments are passed to the transformer:
* The data to be represented in the response body
* The http status code
* A dictionary of headers
The transformer should convert the data appropriately for the mediatype
and return a Flask response object.
Ex::
@api.representation('application/xml')
def xml(data, code, headers):
resp = make_response(convert_data_to_xml(data), code)
resp.headers.extend(headers)
return resp
"""
def wrapper(func):
self.representations[mediatype] = func
return func
return wrapper
def unauthorized(self, response):
""" Given a response, change it to ask for credentials """
realm = current_app.config.get("HTTP_BASIC_AUTH_REALM", "flask-restful")
challenge = u"{0} realm=\"{1}\"".format("Basic", realm)
response.headers['WWW-Authenticate'] = challenge
return response
class Resource(MethodView):
"""
Represents an abstract RESTful resource. Concrete resources should
extend from this class and expose methods for each supported HTTP
method. If a resource is invoked with an unsupported HTTP method,
the API will return a response with status 405 Method Not Allowed.
Otherwise the appropriate method is called and passed all arguments
from the url rule used when adding the resource to an Api instance. See
:meth:`~flask.ext.restful.Api.add_resource` for details.
"""
representations = None
method_decorators = []
def dispatch_request(self, *args, **kwargs):
# Taken from flask
#noinspection PyUnresolvedReferences
meth = getattr(self, request.method.lower(), None)
if meth is None and request.method == 'HEAD':
meth = getattr(self, 'get', None)
assert meth is not None, 'Unimplemented method %r' % request.method
for decorator in self.method_decorators:
meth = decorator(meth)
resp = meth(*args, **kwargs)
if isinstance(resp, ResponseBase): # There may be a better way to test
return resp
representations = self.representations or {}
#noinspection PyUnresolvedReferences
for mediatype in self.mediatypes():
if mediatype in representations:
data, code, headers = unpack(resp)
resp = representations[mediatype](data, code, headers)
resp.headers['Content-Type'] = mediatype
return resp
return resp
def marshal(data, fields):
"""Takes raw data (in the form of a dict, list, object) and a dict of
fields to output and filters the data based on those fields.
:param fields: a dict of whose keys will make up the final serialized
response output
:param data: the actual object(s) from which the fields are taken from
>>> from flask.ext.restful import fields, marshal
>>> data = { 'a': 100, 'b': 'foo' }
>>> mfields = { 'a': fields.Raw }
>>> marshal(data, mfields)
OrderedDict([('a', 100)])
"""
def make(cls):
if isinstance(cls, type):
return cls()
return cls
if isinstance(data, (list, tuple)):
return [marshal(d, fields) for d in data]
items = ((k, marshal(data, v) if isinstance(v, dict)
else make(v).output(k, data))
for k, v in fields.items())
return OrderedDict(items)
class marshal_with(object):
"""A decorator that apply marshalling to the return values of your methods.
>>> from flask.ext.restful import fields, marshal_with
>>> mfields = { 'a': fields.Raw }
>>> @marshal_with(mfields)
... def get():
... return { 'a': 100, 'b': 'foo' }
...
...
>>> get()
OrderedDict([('a', 100)])
see :meth:`flask.ext.restful.marshal`
"""
def __init__(self, fields):
""":param fields: a dict of whose keys will make up the final
serialized response output"""
self.fields = fields
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
resp = f(*args, **kwargs)
if isinstance(resp, tuple):
data, code, headers = unpack(resp)
return marshal(data, self.fields), code, headers
else:
return marshal(resp, self.fields)
return wrapper
|
pyblish/pyblish-endpoint
|
pyblish_endpoint/vendor/flask_restful/__init__.py
|
Python
|
lgpl-3.0
| 22,183
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import versionutils
from keystone.conf import utils
_DEPRECATE_EVENTLET_MSG = utils.fmt("""
Support for running keystone under eventlet has been removed in the Newton
release. These options remain for backwards compatibility because they are used
for URL substitutions.
""")
public_bind_host = cfg.HostAddressOpt(
'public_bind_host',
default='0.0.0.0', # nosec : Bind to all interfaces by default for
# backwards compatibility.
deprecated_opts=[
cfg.DeprecatedOpt('bind_host', group='DEFAULT'),
cfg.DeprecatedOpt('public_bind_host', group='DEFAULT'),
],
deprecated_for_removal=True,
deprecated_reason=_DEPRECATE_EVENTLET_MSG,
deprecated_since=versionutils.deprecated.KILO,
help=utils.fmt("""
The IP address of the network interface for the public service to listen on.
"""))
public_port = cfg.PortOpt(
'public_port',
default=5000,
deprecated_name='public_port',
deprecated_group='DEFAULT',
deprecated_for_removal=True,
deprecated_reason=_DEPRECATE_EVENTLET_MSG,
deprecated_since=versionutils.deprecated.KILO,
help=utils.fmt("""
The port number for the public service to listen on.
"""))
admin_bind_host = cfg.HostAddressOpt(
'admin_bind_host',
default='0.0.0.0', # nosec : Bind to all interfaces by default for
# backwards compatibility.
deprecated_opts=[
cfg.DeprecatedOpt('bind_host', group='DEFAULT'),
cfg.DeprecatedOpt('admin_bind_host', group='DEFAULT'),
],
deprecated_for_removal=True,
deprecated_reason=_DEPRECATE_EVENTLET_MSG,
deprecated_since=versionutils.deprecated.KILO,
help=utils.fmt("""
The IP address of the network interface for the admin service to listen on.
"""))
admin_port = cfg.PortOpt(
'admin_port',
default=35357,
deprecated_name='admin_port',
deprecated_group='DEFAULT',
deprecated_for_removal=True,
deprecated_reason=_DEPRECATE_EVENTLET_MSG,
deprecated_since=versionutils.deprecated.KILO,
help=utils.fmt("""
The port number for the admin service to listen on.
"""))
GROUP_NAME = __name__.split('.')[-1]
ALL_OPTS = [
public_bind_host,
public_port,
admin_bind_host,
admin_port,
]
def register_opts(conf):
conf.register_opts(ALL_OPTS, group=GROUP_NAME)
def list_opts():
return {GROUP_NAME: ALL_OPTS}
|
openstack/keystone
|
keystone/conf/eventlet_server.py
|
Python
|
apache-2.0
| 2,959
|
# Copyright (c) 2009 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import time
import traceback
from datetime import datetime, timedelta
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.system.deprecated_logging import log, OutputTee
class TerminateQueue(Exception):
pass
class QueueEngineDelegate:
def queue_log_path(self):
raise NotImplementedError, "subclasses must implement"
def work_item_log_path(self, work_item):
raise NotImplementedError, "subclasses must implement"
def begin_work_queue(self):
raise NotImplementedError, "subclasses must implement"
def should_continue_work_queue(self):
raise NotImplementedError, "subclasses must implement"
def next_work_item(self):
raise NotImplementedError, "subclasses must implement"
def should_proceed_with_work_item(self, work_item):
# returns (safe_to_proceed, waiting_message, patch)
raise NotImplementedError, "subclasses must implement"
def process_work_item(self, work_item):
raise NotImplementedError, "subclasses must implement"
def handle_unexpected_error(self, work_item, message):
raise NotImplementedError, "subclasses must implement"
class QueueEngine:
def __init__(self, name, delegate, wakeup_event):
self._name = name
self._delegate = delegate
self._wakeup_event = wakeup_event
self._output_tee = OutputTee()
log_date_format = "%Y-%m-%d %H:%M:%S"
sleep_duration_text = "2 mins" # This could be generated from seconds_to_sleep
seconds_to_sleep = 120
handled_error_code = 2
# Child processes exit with a special code to the parent queue process can detect the error was handled.
@classmethod
def exit_after_handled_error(cls, error):
log(error)
exit(cls.handled_error_code)
def run(self):
self._begin_logging()
self._delegate.begin_work_queue()
while (self._delegate.should_continue_work_queue()):
try:
self._ensure_work_log_closed()
work_item = self._delegate.next_work_item()
if not work_item:
self._sleep("No work item.")
continue
if not self._delegate.should_proceed_with_work_item(work_item):
self._sleep("Not proceeding with work item.")
continue
# FIXME: Work logs should not depend on bug_id specificaly.
# This looks fixed, no?
self._open_work_log(work_item)
try:
if not self._delegate.process_work_item(work_item):
log("Unable to process work item.")
continue
except ScriptError, e:
# Use a special exit code to indicate that the error was already
# handled in the child process and we should just keep looping.
if e.exit_code == self.handled_error_code:
continue
message = "Unexpected failure when processing patch! Please file a bug against webkit-patch.\n%s" % e.message_with_output()
self._delegate.handle_unexpected_error(work_item, message)
except TerminateQueue, e:
self._stopping("TerminateQueue exception received.")
return 0
except KeyboardInterrupt, e:
self._stopping("User terminated queue.")
return 1
except Exception, e:
traceback.print_exc()
# Don't try tell the status bot, in case telling it causes an exception.
self._sleep("Exception while preparing queue")
self._stopping("Delegate terminated queue.")
return 0
def _stopping(self, message):
log("\n%s" % message)
self._delegate.stop_work_queue(message)
# Be careful to shut down our OutputTee or the unit tests will be unhappy.
self._ensure_work_log_closed()
self._output_tee.remove_log(self._queue_log)
def _begin_logging(self):
self._queue_log = self._output_tee.add_log(self._delegate.queue_log_path())
self._work_log = None
def _open_work_log(self, work_item):
work_item_log_path = self._delegate.work_item_log_path(work_item)
if not work_item_log_path:
return
self._work_log = self._output_tee.add_log(work_item_log_path)
def _ensure_work_log_closed(self):
# If we still have a bug log open, close it.
if self._work_log:
self._output_tee.remove_log(self._work_log)
self._work_log = None
def _now(self):
"""Overriden by the unit tests to allow testing _sleep_message"""
return datetime.now()
def _sleep_message(self, message):
wake_time = self._now() + timedelta(seconds=self.seconds_to_sleep)
return "%s Sleeping until %s (%s)." % (message, wake_time.strftime(self.log_date_format), self.sleep_duration_text)
def _sleep(self, message):
log(self._sleep_message(message))
self._wakeup_event.wait(self.seconds_to_sleep)
self._wakeup_event.clear()
|
mogoweb/webkit_for_android5.1
|
webkit/Tools/Scripts/webkitpy/tool/bot/queueengine.py
|
Python
|
apache-2.0
| 6,810
|
#-*- coding: utf-8 -*-
def factorial(n):
"""Return the factorial of n"""
if n < 2:
return 1
return n * factorial(n - 1)
def fibonacci(n):
"""Return the nth fibonacci number"""
if n < 2:
return n
return fibonacci(n - 1) + fibonacci(n - 2)
def fib_fac(x=30, y=900):
fib = fibonacci(x)
fac = factorial(y)
print "fibonacci({}):".format(x), fib
print "factorial({}):".format(y), fac
if __name__ == "__main__":
def opc1():
fruits = tuple(str(i) for i in xrange(100))
out = ''
for fruit in fruits:
out += fruit +':'
return out
def opc2():
format_str = '%s:' * 100
fruits = tuple(str(i) for i in xrange(100))
out = format_str % fruits
return out
def opc3():
format_str = '{}:' * 100
fruits = tuple(str(i) for i in xrange(100))
out = format_str.format(*fruits)
return out
def opc4():
fruits = tuple(str(i) for i in xrange(100))
out = ':'.join(fruits)
return out
import timeit
print timeit.timeit(stmt=opc4, number=100)
fib_fac()
|
ealogar/curso-python
|
advanced/fib_fac.py
|
Python
|
apache-2.0
| 1,159
|
from trueskill import TrueSkill, Rating, rate
import argparse
from pytba import api as tba
import math
class FrcTrueSkill:
def __init__(self):
self.env = TrueSkill(draw_probability=0.02)
self.trueskills = {}
self.events = {}
def update(self, red_alliance, red_score, blue_alliance, blue_score):
# Calculate teams per alliance
for alliance in [red_alliance, blue_alliance]:
for team in alliance:
if not team in self.trueskills:
self.trueskills[team] = self.env.Rating()
# Update ratings based on result
if red_score == blue_score: # Tied
if red_score == -1:
return # No result yet
ranks = [0, 0]
elif red_score > blue_score: # Red beat blue
ranks = [0, 1] # Lower is better
else:
ranks = [1, 0]
new_red, new_blue = self.env.rate([[trueskills[number] for number in red_alliance],
[trueskills[number] for number in blue_alliance]], ranks)
# Store the new values
new_ratings = new_red + new_blue
for rating, team_number in zip(new_ratings, red_alliance + blue_alliance):
self.trueskills[team_number] = rating
def predict(self, red_alliance, blue_alliance):
proba = self.env.quality([[teams[number] for number in red_alliance],
[teams[number] for number in blue_alliance]])
return math.round((1.0-proba)*100)
def skill(self, team):
return self.env.expose(trueskills[team])
def parse_matches(matches, env, predict=False):
count = 0.0
draws = 0.0
# Initialise our trueskills dictionary
trueskills = {}
for row in matches:
alliances = row['alliances']
red_alliance = alliances['red']['teams']
blue_alliance = alliances['blue']['teams']
# Calculate teams per alliance
for alliance in [red_alliance, blue_alliance]:
for team in alliance:
if not team in trueskills:
trueskills[team] = env.Rating()
# Update ratings based on result
if alliances['red']['score'] == alliances['blue']['score']: # Tied
if alliances['red']['score'] == -1:
if predict:
proba = env.quality([[teams[number] for number in red_alliance],
[teams[number] for number in blue_alliance]])
print(row['match_number'], [str(number)[3:] for number in red_alliance], [str(number)[3:] for number in blue_alliance], "Win probability: %2.0f:%2.0f" %((1.0-proba)*100,proba*100))
else:
continue # No result yet
ranks = [0, 0]
draws = draws + 1
elif alliances['red']['score'] > alliances['blue']['score']: # Red beat blue
ranks = [0, 1] # Lower is better
else:
ranks = [1, 0]
new_red, new_blue = env.rate([[trueskills[number] for number in red_alliance],
[trueskills[number] for number in blue_alliance]], ranks)
count = count + 1
# Store the new values
new_ratings = new_red + new_blue
for rating, team_number in zip(new_ratings, red_alliance + blue_alliance):
trueskills[team_number] = rating
if not predict:
if count > 0:
print("Draw rate: %f" % (draws / count))
print("Matches: %i" % count)
return trueskills
def get_all_matches(year):
matches = []
events = tba.tba_get('events/%s' % year)
for event in events:
matches += tba.event_get(event['key']).matches
return sorted(matches, key=lambda k: float('inf') if k['time'] is None else k['time'])
def sort_by_trueskill(trueskills, env):
return sorted(trueskills.items(), key=lambda k: env.expose(k[1]), reverse=True) # Sort by trueskill
def sort_by_name(trueskills):
return sorted(trueskills.items(), key=lambda k: ('0000' + k[0][3:])[-4:]) # Sort by team number
def print_trueskills(trueskills, env):
for k,v in trueskills:
print('%s: %f' % (k, env.expose(v)))
if __name__ == '__main__':
import datetime
now = datetime.datetime.now()
tba.set_api_key('frc4774', 'trueskill', '1.0')
parser = argparse.ArgumentParser(description='Run TrueSkill algorithm on event results.')
parser.add_argument('--predict', help='Predict unplayed matches', dest='predict', action='store_true')
parser.add_argument('--year', help='All matches in all events in specified year', type=str, default=str(now.year))
args = parser.parse_args()
# Set the draw probability based on previous data - around 3%
env = TrueSkill(draw_probability=0.025) # Try tweaking tau and beta too
matches = get_all_matches(args.year)
results = parse_matches(matches, env)
results = sort_by_trueskill(results, env)
#results = sort_by_name(results)
print_trueskills(results, env)
|
Ninjakow/TrueSkill
|
ranking.py
|
Python
|
gpl-3.0
| 5,051
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Parameterized unit tests for quantizing a Tensorflow graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.quantize.python import fold_batch_norms
from tensorflow.contrib.quantize.python import quantize
from tensorflow.python.compat import compat
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import googletest
batch_norm = layers.batch_norm
conv2d = layers.conv2d
fully_connected = layers.fully_connected
separable_conv2d = layers.separable_conv2d
class QuantizeTest(test_util.TensorFlowTestCase):
def _RunWithoutBatchNormTestOverParameters(self, test_fn):
# TODO(suharshs): Use parameterized test once OSS TF supports it.
parameters_list = [
# (activation, activation_op_name, with_bypass, delay)
(nn_ops.relu6, 'Relu6', False, None),
(nn_ops.relu, 'Relu', False, None),
(array_ops.identity, 'Identity', False, None),
(nn_ops.relu6, 'Relu6', False, 5000),
(nn_ops.relu, 'Relu', False, 5000),
(array_ops.identity, 'Identity', False, 5000),
(nn_ops.relu6, 'Relu6', True, None),
(nn_ops.relu, 'Relu', True, None),
(array_ops.identity, 'Identity', True, None),
(nn_ops.relu6, 'Relu6', True, 5000),
(nn_ops.relu, 'Relu', True, 5000),
(array_ops.identity, 'Identity', True, 5000),
]
for params in parameters_list:
# Test everything with resource variables and normal variables.
test_fn(params[0], params[1], params[2], params[3], False, None)
test_fn(params[0], params[1], params[2], params[3], True, None)
# Test with both empty scope and an example scope
test_fn(params[0], params[1], params[2], params[3], False, 'test')
test_fn(params[0], params[1], params[2], params[3], True, 'test')
def _AssertCorrectQuantizedGraphWithoutBatchNorm(
self, graph, scope, layer, activation_op_name, with_bypass, delay,
use_resource):
quantization_node_name = 'FakeQuantWithMinMaxVars'
conv_scope = self._GetConvScope(scope, with_bypass)
delim = '/' if conv_scope else ''
if scope:
scope = scope + '/'
weights_quant = graph.get_operation_by_name(
conv_scope + delim + 'weights_quant/' + quantization_node_name)
self.assertEqual(weights_quant.type, quantization_node_name)
# Assemble the expected inputs.
if use_resource:
expected_inputs = [
conv_scope + delim +
'weights_quant/FakeQuantWithMinMaxVars/ReadVariableOp',
conv_scope + delim +
'weights_quant/FakeQuantWithMinMaxVars/ReadVariableOp_1',
]
if layer == 'DepthwiseConv2dNative':
expected_inputs.append(conv_scope + delim + 'depthwise/ReadVariableOp')
else:
expected_inputs.append(conv_scope + delim + layer + '/ReadVariableOp')
else:
expected_inputs = [
conv_scope + delim + 'weights_quant/AssignMinLast',
conv_scope + delim + 'weights_quant/AssignMaxLast',
]
if layer == 'DepthwiseConv2dNative':
expected_inputs.append(conv_scope + delim + 'depthwise_weights/read')
else:
expected_inputs.append(conv_scope + delim + 'weights/read')
self._AssertInputOpsAre(weights_quant, expected_inputs)
if delay and delay > 0:
output_op_name = (
conv_scope + delim + 'weights_quant/delayed_quant/Switch_1')
else:
if layer == 'DepthwiseConv2dNative':
output_op_name = conv_scope + delim + 'depthwise'
else:
output_op_name = conv_scope + delim + layer
self._AssertOutputGoesToOps(weights_quant, graph, [output_op_name])
if with_bypass:
conv_quant = graph.get_operation_by_name(
conv_scope + delim + 'conv_quant/' + quantization_node_name)
self.assertEqual(conv_quant.type, quantization_node_name)
if use_resource:
expected_inputs = [
conv_scope + delim +
'conv_quant/FakeQuantWithMinMaxVars/ReadVariableOp',
conv_scope + delim +
'conv_quant/FakeQuantWithMinMaxVars/ReadVariableOp_1',
conv_scope + delim + 'BiasAdd',
]
else:
expected_inputs = [
conv_scope + delim + 'conv_quant/AssignMinEma',
conv_scope + delim + 'conv_quant/AssignMaxEma',
conv_scope + delim + 'BiasAdd'
]
self._AssertInputOpsAre(conv_quant, expected_inputs)
output_op_name = (
conv_scope + delim +
'conv_quant/delayed_quant/Switch_1' if delay else scope + 'AddV2')
self._AssertOutputGoesToOps(conv_quant, graph, [output_op_name])
act_quant = graph.get_operation_by_name(scope + 'act_quant/' +
quantization_node_name)
self.assertEqual(act_quant.type, quantization_node_name)
if use_resource:
expected_inputs = [
scope + 'act_quant/FakeQuantWithMinMaxVars/ReadVariableOp',
scope + 'act_quant/FakeQuantWithMinMaxVars/ReadVariableOp_1',
scope + activation_op_name,
]
else:
expected_inputs = [
scope + 'act_quant/AssignMinEma', scope + 'act_quant/AssignMaxEma',
scope + activation_op_name
]
self._AssertInputOpsAre(act_quant, expected_inputs)
output_op_name = (
scope + 'act_quant/delayed_quant/Switch_1'
if delay else 'control_dependency')
self._AssertOutputGoesToOps(act_quant, graph, [output_op_name])
self._AssertIdempotent(graph)
def testQuantize_Conv2dWithoutBatchNorm(self):
self._RunWithoutBatchNormTestOverParameters(
self._TestQuantize_Conv2dWithoutBatchNorm)
def _TestQuantize_Conv2dWithoutBatchNorm(self, activation, activation_op_name,
with_bypass, delay, use_resource,
scope):
"""Tests quantization: inputs -> Conv2d no batch norm -> Activation.
Args:
activation: Callable that returns an Operation, a factory method for the
Activation.
activation_op_name: String, name of the Activation operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Activation.
delay: Int (optional), delay in number of steps until quantization starts.
use_resource: Bool, when true uses resource variables.
scope: String, specifies top level scope for the graph
"""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, height, width, depth = 5, 128, 128, 3
inputs = array_ops.zeros((batch_size, height, width, depth))
stride = 1 if with_bypass else 2
out_depth = 3 if with_bypass else 32
activation_fn = None if with_bypass else activation
conv_scope = self._GetConvScope(scope, with_bypass)
scope = '' if scope is None else scope
delim = '/' if scope else ''
node = conv2d(
inputs,
out_depth, [5, 5],
stride=stride,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
scope=conv_scope)
if with_bypass:
node = math_ops.add(inputs, node, name=scope + delim + 'AddV2')
node = activation(node, name=scope + delim + activation_op_name)
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
quantize.Quantize(graph, True, quant_delay=delay)
if conv_scope is None:
conv_scope = ''
self._AssertCorrectQuantizedGraphWithoutBatchNorm(
graph, scope, 'Conv2D', activation_op_name, with_bypass, delay,
use_resource)
def testQuantize_FCWithoutBatchNorm(self):
self._RunWithoutBatchNormTestOverParameters(
self._TestQuantize_FCWithoutBatchNorm)
def _TestQuantize_FCWithoutBatchNorm(self, activation, activation_op_name,
with_bypass, delay, use_resource, scope):
"""Tests quantization: inputs -> FC no batch norm -> Activation.
Args:
activation: Callable that returns an Operation, a factory method for the
Activation.
activation_op_name: String, name of the Activation operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Activation.
delay: Int (optional), delay in number of steps until quantization starts.
use_resource: Bool, when true uses resource variables.
scope: String, specifies top level scope for the graph
"""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, depth = 5, 256
inputs = array_ops.zeros((batch_size, depth))
out_depth = 256 if with_bypass else 128
activation_fn = None if with_bypass else activation
fc_scope = self._GetConvScope(scope, with_bypass)
scope = '' if scope is None else scope
delim = '/' if scope else ''
node = fully_connected(
inputs,
out_depth,
weights_initializer=self._WeightInit(0.03),
activation_fn=activation_fn,
scope=fc_scope)
if with_bypass:
node = math_ops.add(inputs, node, name=scope + delim + 'AddV2')
node = activation(node, name=scope + delim + activation_op_name)
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
quantize.Quantize(graph, True, quant_delay=delay)
self._AssertCorrectQuantizedGraphWithoutBatchNorm(
graph, scope, 'MatMul', activation_op_name, with_bypass, delay,
use_resource)
def testQuantize_DepthwiseConv2dWithoutBatchNorm(self):
self._RunWithoutBatchNormTestOverParameters(
self._TestQuantize_DepthwiseConv2dWithoutBatchNorm)
def _TestQuantize_DepthwiseConv2dWithoutBatchNorm(
self, activation, activation_op_name, with_bypass, delay, use_resource,
scope):
"""Tests quantization: inputs -> DWConv2d no batch norm -> Activation.
Args:
activation: Callable that returns an Operation, a factory method for the
Activation.
activation_op_name: String, name of the Activation operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Activation.
delay: Int (optional), delay in number of steps until quantization starts.
use_resource: Bool, when true uses resource variables.
scope: String, specifies top level scope for the graph
"""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, height, width, depth = 5, 128, 128, 3
inputs = array_ops.zeros((batch_size, height, width, depth))
stride = 1 if with_bypass else 2
activation_fn = None if with_bypass else activation
conv_scope = self._GetConvScope(scope, with_bypass)
scope = '' if scope is None else scope
delim = '/' if scope else ''
node = separable_conv2d(
inputs,
None, [5, 5],
stride=stride,
depth_multiplier=1.0,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
scope=conv_scope)
if with_bypass:
node = math_ops.add(inputs, node, name=scope + delim + 'AddV2')
node = activation(node, name=scope + delim + activation_op_name)
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
quantize.Quantize(graph, True, quant_delay=delay)
self._AssertCorrectQuantizedGraphWithoutBatchNorm(
graph, scope, 'DepthwiseConv2dNative', activation_op_name, with_bypass,
delay, use_resource)
def testQuantize_AtrousConvWithoutBatchNorm(self):
self._RunWithoutBatchNormTestOverParameters(
self._TestQuantize_AtrousConvWithoutBatchNorm)
def _TestQuantize_AtrousConvWithoutBatchNorm(self, activation,
activation_op_name, with_bypass,
delay, use_resource, scope):
"""Tests quantization: inputs -> atrous conv no batch norm -> Activation.
Args:
activation: Callable that returns an Operation, a factory method for the
Activation.
activation_op_name: String, name of the Activation operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Activation.
delay: Int (optional), delay in number of steps until quantization starts.
use_resource: Bool, when true uses resource variables.
scope: String, specifies top level scope for the graph
"""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, height, width, depth = 5, 128, 128, 3
inputs = array_ops.zeros((batch_size, height, width, depth))
dilation_rate = 2
activation_fn = None if with_bypass else activation
conv_scope = self._GetConvScope(scope, with_bypass)
scope = '' if scope is None else scope
delim = '/' if scope else ''
node = separable_conv2d(
inputs,
None, [3, 3],
rate=dilation_rate,
depth_multiplier=1.0,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
scope=conv_scope)
if with_bypass:
node = math_ops.add(inputs, node, name=scope + delim + 'AddV2')
node = activation(node, name=scope + delim + activation_op_name)
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
quantize.Quantize(graph, True, quant_delay=delay)
self._AssertCorrectQuantizedGraphWithoutBatchNorm(
graph, scope, 'DepthwiseConv2dNative', activation_op_name, with_bypass,
delay, use_resource)
def _RunBatchNormTestOverParameters(self, test_fn):
# TODO(suharshs): Use parameterized test once OSS TF supports it.
parameters_list = [
# (activation, activation_op_name, with_bypass, delay, fused_batch_norm)
(nn_ops.relu6, 'Relu6', False, None, False),
(nn_ops.relu, 'Relu', False, None, False),
(array_ops.identity, 'Identity', False, None, False),
(nn_ops.relu6, 'Relu6', False, 5000, False),
(nn_ops.relu, 'Relu', False, 5000, False),
(array_ops.identity, 'Identity', False, 5000, False),
(nn_ops.relu6, 'Relu6', True, None, False),
(nn_ops.relu, 'Relu', True, None, False),
(array_ops.identity, 'Identity', True, None, False),
(nn_ops.relu6, 'Relu6', True, 5000, False),
(nn_ops.relu, 'Relu', True, 5000, False),
(array_ops.identity, 'Identity', True, 5000, False),
(nn_ops.relu6, 'Relu6', False, None, True),
(nn_ops.relu, 'Relu', False, None, True),
(array_ops.identity, 'Identity', False, None, True),
(nn_ops.relu6, 'Relu6', False, 5000, True),
(nn_ops.relu, 'Relu', False, 5000, True),
(array_ops.identity, 'Identity', False, 5000, True),
(nn_ops.relu6, 'Relu6', True, None, True),
(nn_ops.relu, 'Relu', True, None, True),
(array_ops.identity, 'Identity', True, None, True),
(nn_ops.relu6, 'Relu6', True, 5000, True),
(nn_ops.relu, 'Relu', True, 5000, True),
(array_ops.identity, 'Identity', True, 5000, True)
]
for params in parameters_list:
# Test everything with resource variables and normal variables.
test_fn(params[0], params[1], params[2], params[3], params[4], False,
None)
test_fn(params[0], params[1], params[2], params[3], params[4], True, None)
test_fn(params[0], params[1], params[2], params[3], params[4], False,
'test')
test_fn(params[0], params[1], params[2], params[3], params[4], True,
'test')
def _AssertCorrectQuantizedGraphWithBatchNorm(self, graph, scope, layer,
activation_op_name, with_bypass,
delay, use_resource):
quantization_node_name = 'FakeQuantWithMinMaxVars'
conv_scope = self._GetConvScope(scope, with_bypass)
delim = '/' if conv_scope else ''
if scope:
scope = scope + '/'
weights_quant = graph.get_operation_by_name(
conv_scope + delim + 'weights_quant/' + quantization_node_name)
self.assertEqual(weights_quant.type, quantization_node_name)
if use_resource:
expected_inputs = [
conv_scope + delim +
'weights_quant/FakeQuantWithMinMaxVars/ReadVariableOp',
conv_scope + delim +
'weights_quant/FakeQuantWithMinMaxVars/ReadVariableOp_1',
]
else:
expected_inputs = [
conv_scope + delim + 'weights_quant/' + 'AssignMinLast',
conv_scope + delim + 'weights_quant/' + 'AssignMaxLast'
]
expected_inputs.append(conv_scope + delim + 'mul_fold')
self._AssertInputOpsAre(weights_quant, expected_inputs)
if layer == 'DepthwiseConv2dNative':
output_op_name = conv_scope + delim + (
'weights_quant/delayed_quant/Switch_1' if delay else 'depthwise_Fold')
else:
output_op_name = conv_scope + delim + (
'weights_quant/delayed_quant/Switch_1' if delay else layer + '_Fold')
self._AssertOutputGoesToOps(weights_quant, graph, [output_op_name])
if with_bypass:
conv_quant = graph.get_operation_by_name(
conv_scope + delim + 'conv_quant/' + quantization_node_name)
self.assertEqual(conv_quant.type, quantization_node_name)
if use_resource:
expected_inputs = [
conv_scope + delim +
'conv_quant/FakeQuantWithMinMaxVars/ReadVariableOp',
conv_scope + delim +
'conv_quant/FakeQuantWithMinMaxVars/ReadVariableOp_1',
]
else:
expected_inputs = [
conv_scope + delim + 'conv_quant/AssignMinEma',
conv_scope + delim + 'conv_quant/AssignMaxEma',
]
expected_inputs.append(conv_scope + delim + 'add_fold')
self._AssertInputOpsAre(conv_quant, expected_inputs)
output_op_name = (
conv_scope + delim +
'conv_quant/delayed_quant/Switch_1' if delay else scope + 'AddV2')
self._AssertOutputGoesToOps(conv_quant, graph, [output_op_name])
act_quant = graph.get_operation_by_name(scope + 'act_quant/' +
quantization_node_name)
self.assertEqual(act_quant.type, quantization_node_name)
if use_resource:
expected_inputs = [
scope + 'act_quant/FakeQuantWithMinMaxVars/ReadVariableOp',
scope + 'act_quant/FakeQuantWithMinMaxVars/ReadVariableOp_1',
]
else:
expected_inputs = [
scope + 'act_quant/AssignMinEma',
scope + 'act_quant/AssignMaxEma',
]
expected_inputs.append(scope + activation_op_name)
self._AssertInputOpsAre(act_quant, expected_inputs)
output_op_name = (
scope + 'act_quant/delayed_quant/Switch_1'
if delay else 'control_dependency')
self._AssertOutputGoesToOps(act_quant, graph, [output_op_name])
self._AssertIdempotent(graph)
def testQuantize_Conv2dWithBatchNorm(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
self._RunBatchNormTestOverParameters(
self._TestQuantize_Conv2dWithBatchNorm)
def _TestQuantize_Conv2dWithBatchNorm(self, activation, activation_op_name,
with_bypass, delay, fused_batch_norm,
use_resource, scope):
"""Tests quantization: inputs -> Conv2d with batch norm -> Activation.
Args:
activation: Callable that returns an Operation, a factory method for the
Activation.
activation_op_name: String, name of the Activation operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Activation.
delay: Int (optional), delay in number of steps until quantization starts.
fused_batch_norm: Bool, when true use FusedBatchNorm.
use_resource: Bool, when true uses resource variables.
scope: String, specifies top level scope for the graph
"""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, height, width, depth = 5, 128, 128, 3
inputs = array_ops.zeros((batch_size, height, width, depth))
stride = 1 if with_bypass else 2
out_depth = 3 if with_bypass else 32
conv_scope = self._GetConvScope(scope, with_bypass)
scope = '' if scope is None else scope
delim = '/' if scope else ''
node = conv2d(
inputs,
out_depth, [5, 5],
stride=stride,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(fused_batch_norm),
scope=conv_scope)
# Manually add a bypass (optional) and an activation.
if with_bypass:
node = math_ops.add(inputs, node, name=scope + delim + 'AddV2')
node = activation(node, name=scope + delim + activation_op_name)
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
fold_batch_norms.FoldBatchNorms(graph, is_training=True)
quantize.Quantize(graph, True, quant_delay=delay)
self._AssertCorrectQuantizedGraphWithBatchNorm(
graph, scope, 'Conv2D', activation_op_name, with_bypass, delay,
use_resource)
def testQuantize_FCWithBatchNorm(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
self._RunBatchNormTestOverParameters(self._TestQuantize_FCWithBatchNorm)
def _TestQuantize_FCWithBatchNorm(self, activation, activation_op_name,
with_bypass, delay, fused_batch_norm,
use_resource, scope):
"""Tests quantization: inputs -> FC with batch norm -> Activation.
Args:
activation: Callable that returns an Operation, a factory method for the
Activation.
activation_op_name: String, name of the Activation operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Activation.
delay: Int (optional), delay in number of steps until quantization starts.
fused_batch_norm: Bool, when true use FusedBatchNorm.
use_resource: Bool, when true uses resource variables.
scope: String, specifies top level scope for the graph
"""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, depth = 5, 256
inputs = array_ops.zeros((batch_size, depth))
out_depth = 256 if with_bypass else 128
conv_scope = self._GetConvScope(scope, with_bypass)
scope = '' if scope is None else scope
delim = '/' if scope else ''
node = fully_connected(
inputs,
out_depth,
weights_initializer=self._WeightInit(0.03),
activation_fn=None,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(fused_batch_norm),
scope=conv_scope)
# Manually add a bypass (optional) and an activation.
if with_bypass:
node = math_ops.add(inputs, node, name=scope + delim + 'AddV2')
node = activation(node, name=scope + delim + activation_op_name)
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
fold_batch_norms.FoldBatchNorms(graph, is_training=True)
quantize.Quantize(graph, True, quant_delay=delay)
self._AssertCorrectQuantizedGraphWithBatchNorm(
graph, scope, 'MatMul', activation_op_name, with_bypass, delay,
use_resource)
def testQuantize_DepthwiseConv2dWithBatchNorm(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
self._RunBatchNormTestOverParameters(
self._TestQuantize_DepthwiseConv2dWithBatchNorm)
def _TestQuantize_DepthwiseConv2dWithBatchNorm(
self, activation, activation_op_name, with_bypass, delay,
fused_batch_norm, use_resource, scope):
"""Tests quantization: inputs -> DWConv2d with batch norm -> Activation.
Args:
activation: Callable that returns an Operation, a factory method for the
Activation.
activation_op_name: String, name of the Activation operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Activation.
delay: Int (optional), delay in number of steps until quantization starts.
fused_batch_norm: Bool, when true use FusedBatchNorm.
use_resource: Bool, when true uses resource variables.
scope: String, specifies top level scope for the graph
"""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, height, width, depth = 5, 128, 128, 3
inputs = array_ops.zeros((batch_size, height, width, depth))
stride = 1 if with_bypass else 2
conv_scope = self._GetConvScope(scope, with_bypass)
scope = '' if scope is None else scope
delim = '/' if scope else ''
node = separable_conv2d(
inputs,
None, [5, 5],
stride=stride,
depth_multiplier=1.0,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(fused_batch_norm),
scope=conv_scope)
# Manually add a bypass (optional) and an activation.
if with_bypass:
node = math_ops.add(inputs, node, name=scope + delim + 'AddV2')
node = activation(node, name=scope + delim + activation_op_name)
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
fold_batch_norms.FoldBatchNorms(graph, is_training=True)
quantize.Quantize(graph, True, quant_delay=delay)
self._AssertCorrectQuantizedGraphWithBatchNorm(
graph, scope, 'DepthwiseConv2dNative', activation_op_name,
with_bypass, delay, use_resource)
def testQuantize_AtrousConvWithBatchNorm(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
self._RunBatchNormTestOverParameters(
self._TestQuantize_AtrousConvWithBatchNorm)
def _TestQuantize_AtrousConvWithBatchNorm(
self, activation, activation_op_name, with_bypass, delay,
fused_batch_norm, use_resource, scope):
"""Tests quantization: inputs -> atrous conv with batch norm -> Activation.
Args:
activation: Callable that returns an Operation, a factory method for the
Activation.
activation_op_name: String, name of the Activation operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Activation.
delay: Int (optional), delay in number of steps until quantization starts.
fused_batch_norm: Bool, when true use FusedBatchNorm.
use_resource: Bool, when true uses resource variables.
scope: String, specifies top level scope for the graph
"""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, height, width, depth = 5, 128, 128, 3
inputs = array_ops.zeros((batch_size, height, width, depth))
dilation_rate = 2
conv_scope = self._GetConvScope(scope, with_bypass)
scope = '' if scope is None else scope
delim = '/' if scope else ''
node = separable_conv2d(
inputs,
None, [3, 3],
rate=dilation_rate,
depth_multiplier=1.0,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(fused_batch_norm),
scope=conv_scope)
# Manually add a bypass (optional) and an activation.
if with_bypass:
node = math_ops.add(inputs, node, name=scope + delim + 'AddV2')
node = activation(node, name=scope + delim + activation_op_name)
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
fold_batch_norms.FoldBatchNorms(graph, is_training=True)
quantize.Quantize(graph, True, quant_delay=delay)
self._AssertCorrectQuantizedGraphWithBatchNorm(
graph, scope, 'DepthwiseConv2dNative', activation_op_name,
with_bypass, delay, use_resource)
def _AssertIdempotent(self, graph):
# Ensure that calling the rewrite again doesn't change the graph.
graph_def_before = str(graph.as_graph_def())
with graph.as_default():
# Ensuring that calling the rewrite again doesn't add more nodes.
fold_batch_norms.FoldBatchNorms(graph, is_training=True)
quantize.Quantize(graph, True)
graph_def_after = str(graph.as_graph_def())
self.assertEqual(graph_def_before, graph_def_after)
def testBatchNormForcedUpdates(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
parameter_list = [
# (activation, activation_op_name, fused_batch_norm)
(nn_ops.relu6, 'Relu6', False),
(nn_ops.relu, 'Relu', False),
(array_ops.identity, 'Identity', False),
(nn_ops.relu6, 'Relu6', True),
(nn_ops.relu, 'Relu', True),
(array_ops.identity, 'Identity', True),
]
for params in parameter_list:
self._TestBatchNormForcedUpdates(params[0], params[1], params[2], False)
self._TestBatchNormForcedUpdates(params[0], params[1], params[2], True)
def _TestBatchNormForcedUpdates(self, activation, activation_op_name,
fused_batch_norm, use_resource):
"""post_activation bypass quantization should happen with forced updates."""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
input2 = array_ops.zeros((batch_size, height / 2, width / 2, 32))
# Setting updates_collections to None forces updates adding an extra
# identity operation following batch norms.
bn_params = self._BatchNormParams(
fused=fused_batch_norm, force_updates=True)
conv = conv2d(
input1,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation,
normalizer_fn=batch_norm,
normalizer_params=bn_params,
scope='test/test')
bypass_tensor = math_ops.add(conv, input2, name='test/add')
# The output of the post_activation bypass will be another layer.
_ = conv2d(
bypass_tensor,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
normalizer_fn=batch_norm,
normalizer_params=bn_params,
activation_fn=activation,
scope='test/unused')
fold_batch_norms.FoldBatchNorms(graph, is_training=True)
quantize.Quantize(graph, is_training=True)
# Ensure that the bypass node is preceded by and followed by a
# FakeQuantWithMinMaxVar operation, since the output of the Add isn't an
# activation.
self.assertTrue('FakeQuantWithMinMaxVars' in
[c.type for c in bypass_tensor.consumers()])
self.assertTrue('FakeQuantWithMinMaxVars' in
[i.op.type for i in bypass_tensor.op.inputs])
with open('/tmp/bn_quant_test.pbtxt', 'w') as f:
f.write(str(graph.as_graph_def()))
def _GetConvScope(self, scope, with_bypass):
if scope is None:
scope = ''
delim = '/' if scope else ''
if with_bypass:
conv_scope = scope + delim + 'test2'
else:
conv_scope = scope
return conv_scope
def _BatchNormParams(self, fused=False, force_updates=False):
params = {
'center': True,
'scale': True,
'decay': 1.0 - 0.003,
'fused': fused
}
if force_updates:
params['updates_collections'] = None
return params
def _WeightInit(self, stddev):
"""Returns truncated normal variable initializer.
Function is defined purely to shorten the name so that it stops wrapping.
Args:
stddev: Standard deviation of normal variable.
Returns:
An initialized that initializes with a truncated normal variable.
"""
return init_ops.truncated_normal_initializer(stddev=stddev)
def _AssertInputOpsAre(self, op, in_op_names):
"""Asserts that all inputs to op come from in_op_names (disregarding order).
Args:
op: Operation to check inputs for.
in_op_names: List of strings, operations where all op's inputs should
come from.
"""
expected_inputs = [in_op_name + ':0' for in_op_name in in_op_names]
self.assertItemsEqual([t.name for t in op.inputs], expected_inputs)
def _AssertOutputGoesToOps(self, op, graph, out_op_names):
"""Asserts that outputs from op go to out_op_names (and perhaps others).
Args:
op: Operation to check outputs for.
graph: Graph where output operations are located.
out_op_names: List of strings, operations where op's outputs should go.
"""
for out_op_name in out_op_names:
out_op = graph.get_operation_by_name(out_op_name)
self.assertIn(op.outputs[0].name, [str(t.name) for t in out_op.inputs])
if __name__ == '__main__':
googletest.main()
|
chemelnucfin/tensorflow
|
tensorflow/contrib/quantize/python/quantize_parameterized_test.py
|
Python
|
apache-2.0
| 35,969
|
#Initially forked from Bojan's kernel here: https://www.kaggle.com/tunguz/bow-meta-text-and-dense-features-lb-0-2242/code
#That kernel was forked from Nick Brook's kernel here: https://www.kaggle.com/nicapotato/bow-meta-text-and-dense-features-lgbm?scriptVersionId=3493400
#Used oof method from Faron's kernel here: https://www.kaggle.com/mmueller/stacking-starter?scriptVersionId=390867
#Used some text cleaning method from Muhammad Alfiansyah's kernel here: https://www.kaggle.com/muhammadalfiansyah/push-the-lgbm-v19
import time
notebookstart= time.time()
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import gc
print("Data:\n",os.listdir("../input"))
# Models Packages
from sklearn import metrics
from sklearn.metrics import mean_squared_error
from sklearn import feature_selection
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
# Gradient Boosting
import lightgbm as lgb
from sklearn.linear_model import Ridge
from sklearn.cross_validation import KFold
# Tf-Idf
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.pipeline import FeatureUnion
from scipy.sparse import hstack, csr_matrix
from nltk.corpus import stopwords
# Viz
import seaborn as sns
import matplotlib.pyplot as plt
import re
import string
NFOLDS = 5
SEED = 42
class SklearnWrapper(object):
def __init__(self, clf, seed=0, params=None, seed_bool = True):
if(seed_bool == True):
params['random_state'] = seed
self.clf = clf(**params)
def train(self, x_train, y_train):
self.clf.fit(x_train, y_train)
def predict(self, x):
return self.clf.predict(x)
def get_oof(clf, x_train, y, x_test):
oof_train = np.zeros((ntrain,))
oof_test = np.zeros((ntest,))
oof_test_skf = np.empty((NFOLDS, ntest))
for i, (train_index, test_index) in enumerate(kf):
print('\nFold {}'.format(i))
x_tr = x_train[train_index]
y_tr = y[train_index]
x_te = x_train[test_index]
clf.train(x_tr, y_tr)
oof_train[test_index] = clf.predict(x_te)
oof_test_skf[i, :] = clf.predict(x_test)
oof_test[:] = oof_test_skf.mean(axis=0)
return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)
def cleanName(text):
try:
textProc = text.lower()
textProc = " ".join(map(str.strip, re.split('(\d+)',textProc)))
regex = re.compile(u'[^[:alpha:]]')
textProc = regex.sub(" ", textProc)
textProc = " ".join(textProc.split())
return textProc
except:
return "name error"
def rmse(y, y0):
assert len(y) == len(y0)
return np.sqrt(np.mean(np.power((y - y0), 2)))
print("\nData Load Stage")
training = pd.read_csv('../input/train.csv', index_col = "item_id", parse_dates = ["activation_date"])
traindex = training.index
testing = pd.read_csv('../input/test.csv', index_col = "item_id", parse_dates = ["activation_date"])
testdex = testing.index
ntrain = training.shape[0]
ntest = testing.shape[0]
kf = KFold(ntrain, n_folds=NFOLDS, shuffle=True, random_state=SEED)
y = training.deal_probability.copy()
training.drop("deal_probability",axis=1, inplace=True)
print('Train shape: {} Rows, {} Columns'.format(*training.shape))
print('Test shape: {} Rows, {} Columns'.format(*testing.shape))
print("Combine Train and Test")
df = pd.concat([training,testing],axis=0)
del training, testing
gc.collect()
print('\nAll Data shape: {} Rows, {} Columns'.format(*df.shape))
print("Feature Engineering")
df["price"] = np.log(df["price"]+0.001)
df["price"].fillna(-999,inplace=True)
df["image_top_1"].fillna(-999,inplace=True)
print("\nCreate Time Variables")
df["Weekday"] = df['activation_date'].dt.weekday
df["Weekd of Year"] = df['activation_date'].dt.week
df["Day of Month"] = df['activation_date'].dt.day
# Create Validation Index and Remove Dead Variables
training_index = df.loc[df.activation_date<=pd.to_datetime('2017-04-07')].index
validation_index = df.loc[df.activation_date>=pd.to_datetime('2017-04-08')].index
df.drop(["activation_date","image"],axis=1,inplace=True)
print("\nEncode Variables")
categorical = ["user_id","region","city","parent_category_name","category_name","user_type","image_top_1","param_1","param_2","param_3"]
print("Encoding :",categorical)
# Encoder:
lbl = preprocessing.LabelEncoder()
for col in categorical:
df[col].fillna('Unknown')
df[col] = lbl.fit_transform(df[col].astype(str))
print("\nText Features")
# Feature Engineering
# Meta Text Features
textfeats = ["description", "title"]
#df['title'] = df['title'].apply(lambda x: cleanName(x))
#df["description"] = df["description"].apply(lambda x: cleanName(x))
for cols in textfeats:
df[cols] = df[cols].astype(str)
df[cols] = df[cols].astype(str).fillna('missing') # FILL NA
df[cols] = df[cols].str.lower() # Lowercase all text, so that capitalized words dont get treated differently
df[cols + '_num_words'] = df[cols].apply(lambda comment: len(comment.split())) # Count number of Words
df[cols + '_num_unique_words'] = df[cols].apply(lambda comment: len(set(w for w in comment.split())))
df[cols + '_words_vs_unique'] = df[cols+'_num_unique_words'] / df[cols+'_num_words'] * 100 # Count Unique Words
print("\n[TF-IDF] Term Frequency Inverse Document Frequency Stage")
russian_stop = set(stopwords.words('russian'))
tfidf_para = {
"stop_words": russian_stop,
"analyzer": 'word',
"token_pattern": r'\w{1,}',
"sublinear_tf": True,
"dtype": np.float32,
"norm": 'l2',
#"min_df":5,
#"max_df":.9,
"smooth_idf":False
}
def get_col(col_name): return lambda x: x[col_name]
##I added to the max_features of the description. It did not change my score much but it may be worth investigating
vectorizer = FeatureUnion([
('description',TfidfVectorizer(
ngram_range=(1, 2),
max_features=17000,
**tfidf_para,
preprocessor=get_col('description'))),
('title',CountVectorizer(
ngram_range=(1, 2),
stop_words = russian_stop,
#max_features=7000,
preprocessor=get_col('title')))
])
start_vect=time.time()
#Fit my vectorizer on the entire dataset instead of the training rows
#Score improved by .0001
vectorizer.fit(df.to_dict('records'))
ready_df = vectorizer.transform(df.to_dict('records'))
tfvocab = vectorizer.get_feature_names()
print("Vectorization Runtime: %0.2f Minutes"%((time.time() - start_vect)/60))
# Drop Text Cols
textfeats = ["description", "title"]
df.drop(textfeats, axis=1,inplace=True)
from sklearn.metrics import mean_squared_error
from math import sqrt
ridge_params = {'alpha':20.0, 'fit_intercept':True, 'normalize':False, 'copy_X':True,
'max_iter':None, 'tol':0.001, 'solver':'auto', 'random_state':SEED}
#Ridge oof method from Faron's kernel
#I was using this to analyze my vectorization, but figured it would be interesting to add the results back into the dataset
#It doesn't really add much to the score, but it does help lightgbm converge faster
ridge = SklearnWrapper(clf=Ridge, seed = SEED, params = ridge_params)
ridge_oof_train, ridge_oof_test = get_oof(ridge, ready_df[:ntrain], y, ready_df[ntrain:])
rms = sqrt(mean_squared_error(y, ridge_oof_train))
print('Ridge OOF RMSE: {}'.format(rms))
print("Modeling Stage")
ridge_preds = np.concatenate([ridge_oof_train, ridge_oof_test])
df['ridge_preds'] = ridge_preds
# Combine Dense Features with Sparse Text Bag of Words Features
X = hstack([csr_matrix(df.loc[traindex,:].values),ready_df[0:traindex.shape[0]]]) # Sparse Matrix
testing = hstack([csr_matrix(df.loc[testdex,:].values),ready_df[traindex.shape[0]:]])
tfvocab = df.columns.tolist() + tfvocab
for shape in [X,testing]:
print("{} Rows and {} Cols".format(*shape.shape))
print("Feature Names Length: ",len(tfvocab))
del df
gc.collect();
print("\nModeling Stage")
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.10, random_state=23)
del ridge_preds,vectorizer,ready_df
gc.collect();
print("Light Gradient Boosting Regressor")
lgbm_params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': 'rmse',
# 'max_depth': 15,
'num_leaves': 250,
'feature_fraction': 0.65,
'bagging_fraction': 0.85,
# 'bagging_freq': 5,
'learning_rate': 0.02,
'verbose': 0
}
lgtrain = lgb.Dataset(X_train, y_train,
feature_name=tfvocab,
categorical_feature = categorical)
lgvalid = lgb.Dataset(X_valid, y_valid,
feature_name=tfvocab,
categorical_feature = categorical)
modelstart = time.time()
lgb_clf = lgb.train(
lgbm_params,
lgtrain,
num_boost_round=16000,
valid_sets=[lgtrain, lgvalid],
valid_names=['train','valid'],
early_stopping_rounds=30,
verbose_eval=200
)
# Feature Importance Plot
f, ax = plt.subplots(figsize=[7,10])
lgb.plot_importance(lgb_clf, max_num_features=50, ax=ax)
plt.title("Light GBM Feature Importance")
plt.savefig('feature_import.png')
print("Model Evaluation Stage")
lgpred = lgb_clf.predict(testing)
#Mixing lightgbm with ridge. I haven't really tested if this improves the score or not
#blend = 0.95*lgpred + 0.05*ridge_oof_test[:,0]
lgsub = pd.DataFrame(lgpred,columns=["deal_probability"],index=testdex)
lgsub['deal_probability'].clip(0.0, 1.0, inplace=True) # Between 0 and 1
lgsub.to_csv("lgsub.csv",index=True,header=True)
print("Model Runtime: %0.2f Minutes"%((time.time() - modelstart)/60))
print("Notebook Runtime: %0.2f Minutes"%((time.time() - notebookstart)/60))
|
ifuding/Kaggle
|
SVPC/Code/philly/HimanChau.py
|
Python
|
apache-2.0
| 9,735
|
# -*- coding: utf-8 -*-
"""Module where all interfaces, events and exceptions live."""
from plone.theme.interfaces import IDefaultPloneLayer
from zope.interface import Interface
class IAde25ContactLayer(IDefaultPloneLayer):
"""Marker interface that defines a Zope 3 browser layer."""
class IContactImagesTool(Interface):
""" Responsive image generator
General tool providing srcset compatible image transforms
"""
def create(context):
""" Create a set of image scales
The caller is responsible for passing a valid data dictionary
containing the necessary details
Returns dictionary of available scales
@param uuid: content object UID
"""
|
ade25/ade25.contacts
|
ade25/contacts/interfaces.py
|
Python
|
mit
| 727
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2008-2014 AvanzOSC (Daniel). All Rights Reserved
# Date: 10/04/2014
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
class wizard_remesas_run(osv.osv_memory):
"""
"""
_name = "wizard.remesas.run"
_description = "Procesa lineas para las remesas"
_colummns = {}
def remesas_run(self, cr, uid, data, context):
if context is None:
context = {}
res = {}
return res
wizard_remesas_run()
|
avanzosc/avanzosc6.1
|
l10n_es_devolucion_remesas/wizard/remesas_run.py
|
Python
|
agpl-3.0
| 1,359
|
#
# Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Tests for the HostRegistry class."""
import unittest
from htrun.host_tests_registry import HostRegistry
from htrun import BaseHostTest
class HostRegistryTestCase(unittest.TestCase):
class HostTestClassMock(BaseHostTest):
def setup(self):
pass
def result(self):
pass
def teardown(self):
pass
def setUp(self):
self.HOSTREGISTRY = HostRegistry()
def tearDown(self):
pass
def test_register_host_test(self):
self.HOSTREGISTRY.register_host_test(
"host_test_mock_auto", self.HostTestClassMock()
)
self.assertEqual(True, self.HOSTREGISTRY.is_host_test("host_test_mock_auto"))
def test_unregister_host_test(self):
self.HOSTREGISTRY.register_host_test(
"host_test_mock_2_auto", self.HostTestClassMock()
)
self.assertEqual(True, self.HOSTREGISTRY.is_host_test("host_test_mock_2_auto"))
self.assertNotEqual(
None, self.HOSTREGISTRY.get_host_test("host_test_mock_2_auto")
)
self.HOSTREGISTRY.unregister_host_test("host_test_mock_2_auto")
self.assertEqual(False, self.HOSTREGISTRY.is_host_test("host_test_mock_2_auto"))
def test_get_host_test(self):
self.HOSTREGISTRY.register_host_test(
"host_test_mock_3_auto", self.HostTestClassMock()
)
self.assertEqual(True, self.HOSTREGISTRY.is_host_test("host_test_mock_3_auto"))
self.assertNotEqual(
None, self.HOSTREGISTRY.get_host_test("host_test_mock_3_auto")
)
def test_is_host_test(self):
self.assertEqual(False, self.HOSTREGISTRY.is_host_test(""))
self.assertEqual(False, self.HOSTREGISTRY.is_host_test(None))
self.assertEqual(False, self.HOSTREGISTRY.is_host_test("xyz"))
def test_host_test_str_not_empty(self):
for ht_name in self.HOSTREGISTRY.HOST_TESTS:
ht = self.HOSTREGISTRY.HOST_TESTS[ht_name]
self.assertNotEqual(None, ht)
def test_host_test_has_name_attribute(self):
for ht_name in self.HOSTREGISTRY.HOST_TESTS:
ht = self.HOSTREGISTRY.HOST_TESTS[ht_name]
self.assertTrue(hasattr(ht, "setup"))
self.assertTrue(hasattr(ht, "result"))
self.assertTrue(hasattr(ht, "teardown"))
if __name__ == "__main__":
unittest.main()
|
ARMmbed/greentea
|
test/host_tests/host_registry.py
|
Python
|
apache-2.0
| 2,499
|
# -*- coding: utf-8 -*-
import types
import hasbug.store as store
import hasbug.validation as validation
import hasbug.user
import hasbug.shortener
OWNERSHIP_UPPER_LIMIT = 10
class Belongings(object):
def __init__(self, ownerhips):
self._ownerhips = ownerhips
@property
def shortener_hosts(self):
return [ o.which for o in self._ownerhips if o.what == hasbug.Shortener.bag_name ]
@property
def has_shortener_hosts(self):
return 0 < len(self.shortener_hosts)
@property
def len(self):
return len(self.shortener_hosts)
@property
def reaches_upper_limit(self):
return OWNERSHIP_UPPER_LIMIT <= self.len
class Ownership(store.Stuff):
bag_name = "ownerships"
attributes = [store.StuffKey("owner_key"), store.StuffAttr("what"), store.StuffOrd("which")]
@classmethod
def make(cls, owner_key, what, which):
return cls({ "owner_key": owner_key, "what": what, "which": which })
@store.storing
@classmethod
def add_shortener(cls, repo, shortener):
repo.shorteners.add(shortener)
repo.pattern_signatures.update_pattern(shortener.pattern, shortener.host)
return repo.ownerships.add(Ownership.make(shortener.added_by, shortener.bag_name, shortener.key))
@store.storing
@classmethod
def add_shortener(cls, repo, shortener):
belongins = repo.belongings_for(shortener.added_by)
if belongins and belongins.reaches_upper_limit:
raise validation.raise_validation_error(
shortener, "added_by", "The ower {owner} already has too many shorteners".format(owner=shortener.added_by_login))
sig = repo.pattern_signatures.ensure(shortener.pattern)
if not sig.worth_adding(shortener.pattern, shortener.host):
covered_by = sig.hosts_for(shortener.pattern)
raise validation.raise_validation_error(
shortener, "pattern", "The pattern is covered by {host}".format(host=covered_by[0]))
sig.add(shortener.pattern, shortener.host)
# FIXME: Here is a race condition. (We can live with it though).
repo.shorteners.add(shortener)
repo.pattern_signatures.add(sig, can_replace=True)
return repo.ownerships.add(Ownership.make(shortener.added_by, shortener.bag_name, shortener.key))
@store.storing
@classmethod
def remove_shortener(cls, repo, shortener):
sig = None
try:
sig = repo.pattern_signatures.find_by_pattern(shortener.pattern)
sig.remove(shortener.pattern)
except store.ItemNotFoundError:
pass
repo.ownerships.remove_found(shortener.added_by, shortener.key)
if sig:
repo.pattern_signatures.add(sig, can_replace=True)
repo.shorteners.remove(shortener)
@store.storing
@classmethod
def update_shortener(cls, repo, shortener):
toupdate = repo.shorteners.find(shortener.host)
# FIXME: Once we support named shorteer, this won't be able to be this naive.
repo.remove_shortener(toupdate)
repo.add_shortener(shortener)
@store.storing
@classmethod
def belongings_for(cls, repo, owner):
key = owner if isinstance(owner, types.StringType) or isinstance(owner, types.UnicodeType) else owner.key
return Belongings(repo.ownerships.query(key))
|
omo/hasb.ug
|
hasbug/ownership.py
|
Python
|
bsd-3-clause
| 3,379
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from copy import deepcopy
import subprocess
import time
import platform
import random
from atomic_reactor.constants import (IMAGE_TYPE_DOCKER_ARCHIVE, IMAGE_TYPE_OCI, IMAGE_TYPE_OCI_TAR,
DOCKER_PUSH_MAX_RETRIES, DOCKER_PUSH_BACKOFF_FACTOR)
from atomic_reactor.plugin import PostBuildPlugin
from atomic_reactor.plugins.exit_remove_built_image import defer_removal
from atomic_reactor.plugins.pre_reactor_config import (get_registries, get_group_manifests,
get_koji_session,
get_registries_organization,
get_image_size_limit)
from atomic_reactor.plugins.pre_fetch_sources import PLUGIN_FETCH_SOURCES_KEY
from atomic_reactor.util import (get_manifest_digests, get_config_from_registry, Dockercfg,
get_all_manifests)
from osbs.utils import ImageName
import osbs.utils
from osbs.constants import RAND_DIGITS
__all__ = ('TagAndPushPlugin', )
class ExceedsImageSizeError(RuntimeError):
"""Error of exceeding image size"""
class TagAndPushPlugin(PostBuildPlugin):
"""
Use tags from workflow.tag_conf and push the images to workflow.push_conf
"""
key = "tag_and_push"
is_allowed_to_fail = False
def __init__(self, tasker, workflow, registries=None, koji_target=None):
"""
constructor
:param tasker: ContainerTasker instance
:param workflow: DockerBuildWorkflow instance
:param registries: dict, keys are docker registries, values are dicts containing
per-registry parameters.
Params:
* "insecure" optional boolean - controls whether pushes are allowed over
plain HTTP.
* "secret" optional string - path to the secret, which stores
email, login and password for remote registry
:param koji_target: str, used only for sourcecontainers
"""
# call parent constructor
super(TagAndPushPlugin, self).__init__(tasker, workflow)
self.registries = get_registries(self.workflow, deepcopy(registries or {}))
self.group = get_group_manifests(self.workflow, False)
self.koji_target = koji_target
def need_skopeo_push(self):
if len(self.workflow.exported_image_sequence) > 0:
last_image = self.workflow.exported_image_sequence[-1]
if last_image['type'] == IMAGE_TYPE_OCI or last_image['type'] == IMAGE_TYPE_OCI_TAR:
return True
return False
def push_with_skopeo(self, registry_image, insecure, docker_push_secret,
source_oci_image_path=None):
cmd = ['skopeo', 'copy']
if docker_push_secret is not None:
dockercfg = Dockercfg(docker_push_secret)
cmd.append('--authfile=' + dockercfg.json_secret_path)
if insecure:
cmd.append('--dest-tls-verify=false')
if not source_oci_image_path:
# If the last image has type OCI_TAR, then hunt back and find the
# the untarred version, since skopeo only supports OCI's as an
# untarred directory
image = [x for x in self.workflow.exported_image_sequence if
x['type'] != IMAGE_TYPE_OCI_TAR][-1]
if image['type'] == IMAGE_TYPE_OCI:
source_img = 'oci:{path}:{ref_name}'.format(**image)
elif image['type'] == IMAGE_TYPE_DOCKER_ARCHIVE:
source_img = 'docker-archive://{path}'.format(**image)
else:
raise RuntimeError("Attempt to push unsupported image type %s with skopeo" %
image['type'])
else:
source_img = 'oci:{}'.format(source_oci_image_path)
dest_img = 'docker://' + registry_image.to_str()
cmd += [source_img, dest_img]
self.log.info("Calling: %s", ' '.join(cmd))
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
self.log.error("push failed with output:\n%s", e.output)
raise
def source_get_unique_image(self):
source_result = self.workflow.prebuild_results[PLUGIN_FETCH_SOURCES_KEY]
koji_build_id = source_result['sources_for_koji_build_id']
kojisession = get_koji_session(self.workflow)
timestamp = osbs.utils.utcnow().strftime('%Y%m%d%H%M%S')
random.seed()
current_platform = platform.processor() or 'x86_64'
tag_segments = [
self.koji_target or 'none',
str(random.randrange(10**(RAND_DIGITS - 1), 10**RAND_DIGITS)),
timestamp,
current_platform
]
tag = '-'.join(tag_segments)
get_build_meta = kojisession.getBuild(koji_build_id)
pull_specs = get_build_meta['extra']['image']['index']['pull']
source_image_spec = ImageName.parse(pull_specs[0])
source_image_spec.tag = tag
organization = get_registries_organization(self.workflow)
if organization:
source_image_spec.enclose(organization)
source_image_spec.registry = None
return source_image_spec
def run(self):
pushed_images = []
source_oci_image_path = self.workflow.build_result.oci_image_path
if source_oci_image_path:
source_unique_image = self.source_get_unique_image()
if not self.workflow.tag_conf.unique_images:
if source_oci_image_path:
self.workflow.tag_conf.add_unique_image(source_unique_image)
else:
self.workflow.tag_conf.add_unique_image(self.workflow.image)
config_manifest_digest = None
config_manifest_type = None
config_registry_image = None
image_size_limit = get_image_size_limit(self.workflow)
for registry, registry_conf in self.registries.items():
insecure = registry_conf.get('insecure', False)
push_conf_registry = \
self.workflow.push_conf.add_docker_registry(registry, insecure=insecure)
docker_push_secret = registry_conf.get('secret', None)
self.log.info("Registry %s secret %s", registry, docker_push_secret)
for image in self.workflow.tag_conf.images:
if image.registry:
raise RuntimeError("Image name must not contain registry: %r" % image.registry)
if not source_oci_image_path:
image_size = sum(item['size'] for item in self.workflow.layer_sizes)
config_image_size = image_size_limit['binary_image']
# Only handle the case when size is set > 0 in config
if config_image_size and image_size > config_image_size:
raise ExceedsImageSizeError(
'The size {} of image {} exceeds the limitation {} '
'configured in reactor config.'
.format(image_size, image, image_size_limit)
)
registry_image = image.copy()
registry_image.registry = registry
max_retries = DOCKER_PUSH_MAX_RETRIES
for retry in range(max_retries + 1):
if self.need_skopeo_push() or source_oci_image_path:
self.push_with_skopeo(registry_image, insecure, docker_push_secret,
source_oci_image_path)
else:
self.tasker.tag_and_push_image(self.workflow.builder.image_id,
registry_image, insecure=insecure,
force=True, dockercfg=docker_push_secret)
if source_oci_image_path:
manifests_dict = get_all_manifests(registry_image, registry, insecure,
docker_push_secret, versions=('v2',))
try:
koji_source_manifest_response = manifests_dict['v2']
except KeyError as exc:
raise RuntimeError(
f'Unable to fetch v2 schema 2 digest for {registry_image.to_str()}'
) from exc
self.workflow.koji_source_manifest = koji_source_manifest_response.json()
digests = get_manifest_digests(registry_image, registry,
insecure, docker_push_secret)
if (not (digests.v2 or digests.oci) and (retry < max_retries)):
sleep_time = DOCKER_PUSH_BACKOFF_FACTOR * (2 ** retry)
self.log.info("Retrying push because V2 schema 2 or "
"OCI manifest not found in %is", sleep_time)
time.sleep(sleep_time)
else:
if not self.need_skopeo_push():
defer_removal(self.workflow, registry_image)
break
pushed_images.append(registry_image)
tag = registry_image.to_str(registry=False)
push_conf_registry.digests[tag] = digests
if not config_manifest_digest and (digests.v2 or digests.oci):
if digests.v2:
config_manifest_digest = digests.v2
config_manifest_type = 'v2'
else:
config_manifest_digest = digests.oci
config_manifest_type = 'oci'
config_registry_image = registry_image
if config_manifest_digest:
push_conf_registry.config = get_config_from_registry(
config_registry_image, registry, config_manifest_digest, insecure,
docker_push_secret, config_manifest_type)
else:
self.log.info("V2 schema 2 or OCI manifest is not available to get config from")
self.log.info("All images were tagged and pushed")
return pushed_images
|
DBuildService/atomic-reactor
|
atomic_reactor/plugins/post_tag_and_push.py
|
Python
|
bsd-3-clause
| 10,695
|
from app import db
import datetime
from geoalchemy2 import Geometry
from sqlalchemy.dialects import postgresql
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy import Column
import sqlalchemy
from slugify import slugify
from flask.ext.login import UserMixin
class User(UserMixin, db.Model):
__tablename__ = "users"
id = Column(UUID(as_uuid=True),
server_default=sqlalchemy.text("uuid_generate_v4()"), primary_key=True)
session_token = Column(UUID(as_uuid=True),
server_default=sqlalchemy.text("uuid_generate_v4()"), unique=True)
permissions_group = db.Column(db.String, default='user') # user, staff, admin
ddw_access_token = db.Column(db.String)
ddw_token_expires_in = db.Column(db.Integer)
ddw_avatar_url = db.Column(db.String)
nickname = db.Column(db.String)
social_id = db.Column(db.String, unique=True)
ddw_user_created = db.Column(db.Date)
ddw_user_updated = db.Column(db.Date)
data = db.Column(postgresql.JSONB, nullable=True)
def get_id(self):
return unicode(self.session_token)
|
hardworkingcoder/dw_experiments
|
models.py
|
Python
|
mit
| 1,081
|
# -*- coding: utf-8 -*-
# Copyright (C) 2007-2018, Raffaele Salmaso <raffaele@salmaso.org>
# Copyright (c) 2012 Omoto Kenji
# Copyright (c) 2011 Sam Stephenson
# Copyright (c) 2011 Josh Peek
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
import io
import json
import re
import os
from subprocess import Popen, PIPE, STDOUT
import tempfile
from .exceptions import RuntimeError, ProgramError, RuntimeUnavailable
from .utils import json2_source, which
def encode_unicode_codepoints(str):
r"""
>>> encode_unicode_codepoints("a") == 'a'
True
>>> ascii = ''.join(chr(i) for i in range(0x80))
>>> encode_unicode_codepoints(ascii) == ascii
True
>>> encode_unicode_codepoints('\u4e16\u754c') == '\\u4e16\\u754c'
True
"""
codepoint_format = '\\u{0:04x}'.format
def codepoint(m):
return codepoint_format(ord(m.group(0)))
return re.sub('[^\x00-\x7f]', codepoint, str)
class Runtime(object):
def __init__(self, name, command, runner_source, encoding='utf8'):
self._name = name
if isinstance(command, str):
command = [command]
self._command = command
self._runner_source = runner_source
self._encoding = encoding
def __str__(self):
return "{class_name}({runtime_name})".format(
class_name=type(self).__name__,
runtime_name=self._name,
)
@property
def name(self):
return self._name
def exec_(self, source):
if not self.is_available():
raise RuntimeUnavailable()
return self.Context(self).exec_(source)
def eval(self, source):
if not self.is_available():
raise RuntimeUnavailable()
return self.Context(self).eval(source)
def compile(self, source):
if not self.is_available():
raise RuntimeUnavailable()
return self.Context(self, source)
def is_available(self):
return self._binary() is not None
def runner_source(self):
return self._runner_source
def _binary(self):
"""protected"""
if not hasattr(self, "_binary_cache"):
self._binary_cache = which(self._command)
return self._binary_cache
def _execfile(self, filename):
"""protected"""
cmd = self._binary() + [filename]
p = None
try:
p = Popen(cmd, stdout=PIPE, stderr=STDOUT)
stdoutdata, stderrdata = p.communicate()
ret = p.wait()
finally:
del p
if ret == 0:
return stdoutdata
else:
raise RuntimeError(stdoutdata)
class Context(object):
def __init__(self, runtime, source=''):
self._runtime = runtime
self._source = source
def eval(self, source):
if not source.strip():
data = "''"
else:
data = "'('+" + json.dumps(source, ensure_ascii=True) + "+')'"
code = 'return eval({data})'.format(data=data)
return self.exec_(code)
def exec_(self, source):
if self._source:
source = self._source + '\n' + source
(fd, filename) = tempfile.mkstemp(prefix='babeljs', suffix='.js')
os.close(fd)
try:
with io.open(filename, "w+", encoding=self._runtime._encoding) as fp:
fp.write(self._compile(source))
output = self._runtime._execfile(filename)
finally:
os.remove(filename)
output = output.decode(self._runtime._encoding)
output = output.replace("\r\n", "\n").replace("\r", "\n")
output = self._extract_result(output.split("\n")[-2])
return output
def call(self, identifier, *args):
args = json.dumps(args)
return self.eval("{identifier}.apply(this, {args})".format(identifier=identifier, args=args))
def _compile(self, source):
"""protected"""
runner_source = self._runtime.runner_source()
replacements = {
'#{source}': lambda: source,
'#{encoded_source}': lambda: json.dumps(
"(function(){ " +
encode_unicode_codepoints(source) +
" })()"
),
'#{json2_source}': json2_source,
}
pattern = "|".join(re.escape(k) for k in replacements)
runner_source = re.sub(pattern, lambda m: replacements[m.group(0)](), runner_source)
return runner_source
def _extract_result(self, output_last_line):
"""protected"""
if not output_last_line:
status = value = None
else:
ret = json.loads(output_last_line)
if len(ret) == 1:
ret = [ret[0], None]
status, value = ret
if status == "ok":
return value
elif value and value.startswith('SyntaxError:'):
raise RuntimeError(value)
else:
raise ProgramError(value)
class PyV8Runtime(object):
def __init__(self):
try:
import PyV8
except ImportError:
self._is_available = False
else:
self._is_available = True
@property
def name(self):
return "PyV8"
def exec_(self, source):
return self.Context().exec_(source)
def eval(self, source):
return self.Context().eval(source)
def compile(self, source):
return self.Context(source)
def is_available(self):
return self._is_available
class Context:
def __init__(self, source=""):
self._source = source
def exec_(self, source):
source = '''\
(function() {{
{0};
{1};
}})()'''.format(
encode_unicode_codepoints(self._source),
encode_unicode_codepoints(source)
)
source = str(source)
import PyV8
import contextlib
#backward compatibility
with contextlib.nested(PyV8.JSContext(), PyV8.JSEngine()) as (ctxt, engine):
js_errors = (PyV8.JSError, IndexError, ReferenceError, SyntaxError, TypeError)
try:
script = engine.compile(source)
except js_errors as e:
raise RuntimeError(e)
try:
value = script.run()
except js_errors as e:
raise ProgramError(e)
return self.convert(value)
def eval(self, source):
return self.exec_('return ' + encode_unicode_codepoints(source))
def call(self, identifier, *args):
args = json.dumps(args)
return self.eval("{identifier}.apply(this, {args})".format(identifier=identifier, args=args))
@classmethod
def convert(cls, obj):
from PyV8 import _PyV8
if isinstance(obj, bytes):
return obj.decode('utf8')
if isinstance(obj, _PyV8.JSArray):
return [cls.convert(v) for v in obj]
elif isinstance(obj, _PyV8.JSFunction):
return None
elif isinstance(obj, _PyV8.JSObject):
ret = {}
for k in obj.keys():
v = cls.convert(obj[k])
if v is not None:
ret[cls.convert(k)] = v
return ret
else:
return obj
|
rsalmaso/django-babeljs
|
babeljs/execjs/runtime.py
|
Python
|
mit
| 8,806
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# $Id$
# Copyright (c) 2011, 2012 Technische Universität Dortmund
#
# This file is part of doyouspeakOCCI.
#
# doyouspeakOCCI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# doyouspeakOCCI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with doyouspeakOCCI. If not, see <http://www.gnu.org/licenses/>.
from google.appengine.ext import db
class Suite(db.Model):
"""
Stores information on each compliance test request.
TODO: not yet documented.
"""
date = db.DateTimeProperty(auto_now_add=True)
user = db.UserProperty()
service_uri = db.LinkProperty()
is_compliant = db.BooleanProperty()
def to_dict(self, with_tests=False, flatten_date=False):
result = {
'uuid': self.key().name(),
'date': self.date.isoformat() if flatten_date else self.date,
'service_uri': self.service_uri,
'is_compliant': self.is_compliant
}
if with_tests:
tests = []
for test in self.tests:
tests.append({'name': test.name, 'description': test.description, 'result': test.result})
result['tests'] = tests
return result
class Test(db.Model):
"""
Stores statistical data on each compliance test run.
TODO: not yet documented.
"""
suite = db.ReferenceProperty(Suite, collection_name='tests')
name = db.StringProperty()
description = db.TextProperty()
result = db.BooleanProperty()
def to_dict(self, with_details=True):
result = {
'name': self.name,
'description': self.description,
'result': self.result,
}
if with_details:
details = []
for detail in self.details:
details.append({'message': detail.message, 'response': detail.response})
result['details'] = details
return result
class Detail(db.Model):
"""
"""
test = db.ReferenceProperty(Test, collection_name='details')
message = db.TextProperty()
response = db.TextProperty()
# eof
|
irf/doyouspeakocci
|
dyso/model.py
|
Python
|
gpl-3.0
| 2,552
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2017 David Mandelberg
# Copyright (C) 2017-2018 Sambhav Kothari
# Copyright (C) 2017-2019 Laurent Monin
# Copyright (C) 2018-2020 Philipp Wolfer
# Copyright (C) 2019 Michael Wiencek
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import re
from picard import config
from picard.const import RELEASE_FORMATS
from picard.util import (
format_time,
linear_combination_of_weights,
parse_amazon_url,
translate_from_sortname,
)
_artist_rel_types = {
"arranger": "arranger",
"audio": "engineer",
"chorus master": "conductor",
"composer": "composer",
"concertmaster": "performer:concertmaster",
"conductor": "conductor",
"engineer": "engineer",
"instrumentator": "arranger",
"librettist": "lyricist",
"live sound": "engineer",
"lyricist": "lyricist",
# "mastering": "engineer",
"mix-DJ": "djmixer",
"mix": "mixer",
"orchestrator": "arranger",
"performing orchestra": "performer:orchestra",
"producer": "producer",
# "recording": "engineer",
"remixer": "remixer",
"sound": "engineer",
"writer": "writer",
}
_TRACK_TO_METADATA = {
'number': '~musicbrainz_tracknumber',
'position': 'tracknumber',
'title': 'title',
}
_MEDIUM_TO_METADATA = {
'format': 'media',
'position': 'discnumber',
'title': 'discsubtitle',
'track-count': 'totaltracks',
}
_RECORDING_TO_METADATA = {
'disambiguation': '~recordingcomment',
'title': 'title',
}
_RELEASE_TO_METADATA = {
'asin': 'asin',
'barcode': 'barcode',
'country': 'releasecountry',
'date': 'date',
'disambiguation': '~releasecomment',
'title': 'album',
}
_ARTIST_TO_METADATA = {
'gender': 'gender',
'name': 'name',
'type': 'type',
}
_RELEASE_GROUP_TO_METADATA = {
'disambiguation': '~releasegroupcomment',
'first-release-date': 'originaldate',
'title': '~releasegroup',
}
def _decamelcase(text):
return re.sub(r'([A-Z])', r' \1', text).strip()
_REPLACE_MAP = {}
_PREFIX_ATTRS = ['guest', 'additional', 'minor', 'solo']
_BLANK_SPECIAL_RELTYPES = {'vocal': 'vocals'}
def _transform_attribute(attr, attr_credits):
if attr in attr_credits:
return attr_credits[attr]
else:
return _decamelcase(_REPLACE_MAP.get(attr, attr))
def _parse_attributes(attrs, reltype, attr_credits):
prefixes = []
nouns = []
for attr in attrs:
attr = _transform_attribute(attr, attr_credits)
if attr in _PREFIX_ATTRS:
prefixes.append(attr)
else:
nouns.append(attr)
prefix = ' '.join(prefixes)
if len(nouns) > 1:
result = '%s and %s' % (', '.join(nouns[:-1]), nouns[-1:][0])
elif len(nouns) == 1:
result = nouns[0]
else:
result = _BLANK_SPECIAL_RELTYPES.get(reltype, '')
return ' '.join([prefix, result]).strip().lower()
def _relations_to_metadata(relations, m):
use_credited_as = not config.setting['standardize_artists']
use_instrument_credits = not config.setting['standardize_instruments']
for relation in relations:
if relation['target-type'] == 'artist':
artist = relation['artist']
value, valuesort = _translate_artist_node(artist)
has_translation = (value != artist['name'])
if not has_translation and use_credited_as and 'target-credit' in relation:
credited_as = relation['target-credit']
if credited_as:
value, valuesort = credited_as, credited_as
reltype = relation['type']
attribs = []
if 'attributes' in relation:
attribs = [a for a in relation['attributes']]
if reltype in ('vocal', 'instrument', 'performer'):
if use_instrument_credits:
attr_credits = relation.get('attribute-credits', {})
else:
attr_credits = {}
name = 'performer:' + _parse_attributes(attribs, reltype, attr_credits)
elif reltype == 'mix-DJ' and len(attribs) > 0:
if not hasattr(m, "_djmix_ars"):
m._djmix_ars = {}
for attr in attribs:
m._djmix_ars.setdefault(attr.split()[1], []).append(value)
continue
else:
try:
name = _artist_rel_types[reltype]
except KeyError:
continue
if value not in m[name]:
m.add(name, value)
if name == 'composer' and valuesort not in m['composersort']:
m.add('composersort', valuesort)
elif relation['target-type'] == 'work':
if relation['type'] == 'performance':
performance_to_metadata(relation, m)
work_to_metadata(relation['work'], m)
elif relation['target-type'] == 'url':
if relation['type'] == 'amazon asin' and 'asin' not in m:
amz = parse_amazon_url(relation['url']['resource'])
if amz is not None:
m['asin'] = amz['asin']
elif relation['type'] == 'license':
url = relation['url']['resource']
m.add('license', url)
def _translate_artist_node(node):
transl, translsort = None, None
if config.setting['translate_artist_names']:
locale = config.setting["artist_locale"]
lang = locale.split("_")[0]
if "aliases" in node:
result = (-1, (None, None))
for alias in node['aliases']:
if not alias["primary"]:
continue
if "locale" not in alias:
continue
parts = []
if alias['locale'] == locale:
score = 0.8
elif alias['locale'] == lang:
score = 0.6
elif alias['locale'].split("_")[0] == lang:
score = 0.4
else:
continue
parts.append((score, 5))
if alias["type"] == "Artist name":
score = 0.8
elif alias["type"] == "Legal Name":
score = 0.5
else:
# as 2014/09/19, only Artist or Legal names should have the
# Primary flag
score = 0.0
parts.append((score, 5))
comb = linear_combination_of_weights(parts)
if comb > result[0]:
result = (comb, (alias['name'], alias["sort-name"]))
transl, translsort = result[1]
if not transl:
translsort = node['sort-name']
transl = translate_from_sortname(node['name'] or "", translsort)
else:
transl, translsort = node['name'], node['sort-name']
return (transl, translsort)
def artist_credit_from_node(node):
artist = ""
artistsort = ""
artists = []
artistssort = []
use_credited_as = not config.setting["standardize_artists"]
for artist_info in node:
a = artist_info['artist']
translated, translated_sort = _translate_artist_node(a)
has_translation = (translated != a['name'])
if has_translation:
name = translated
elif use_credited_as and 'name' in artist_info:
name = artist_info['name']
else:
name = a['name']
artist += name
artistsort += translated_sort or ""
artists.append(name)
artistssort.append(translated_sort)
if 'joinphrase' in artist_info:
artist += artist_info['joinphrase'] or ""
artistsort += artist_info['joinphrase'] or ""
return (artist, artistsort, artists, artistssort)
def artist_credit_to_metadata(node, m, release=False):
ids = [n['artist']['id'] for n in node]
artist, artistsort, artists, artistssort = artist_credit_from_node(node)
if release:
m["musicbrainz_albumartistid"] = ids
m["albumartist"] = artist
m["albumartistsort"] = artistsort
m["~albumartists"] = artists
m["~albumartists_sort"] = artistssort
else:
m["musicbrainz_artistid"] = ids
m["artist"] = artist
m["artistsort"] = artistsort
m["artists"] = artists
m["~artists_sort"] = artistssort
def countries_from_node(node):
countries = []
if "release-events" in node:
for release_event in node['release-events']:
try:
country_code = release_event['area']['iso-3166-1-codes'][0]
# TypeError in case object is None
except (KeyError, IndexError, TypeError):
pass
else:
if country_code:
countries.append(country_code)
return countries
def release_dates_and_countries_from_node(node):
dates = []
countries = []
if "release-events" in node:
for release_event in node['release-events']:
dates.append(release_event['date'] or '')
country_code = ''
try:
country_code = release_event['area']['iso-3166-1-codes'][0]
# TypeError in case object is None
except (KeyError, IndexError, TypeError):
pass
countries.append(country_code)
return dates, countries
def label_info_from_node(node):
labels = []
catalog_numbers = []
for label_info in node:
if 'label' in label_info and label_info['label'] and 'name' in label_info['label']:
label = label_info['label']['name']
if label and label not in labels:
labels.append(label)
if 'catalog-number' in label_info:
cat_num = label_info['catalog-number']
if cat_num and cat_num not in catalog_numbers:
catalog_numbers.append(cat_num)
return (labels, catalog_numbers)
def media_formats_from_node(node):
formats_count = {}
formats_order = []
for medium in node:
text = medium.get('format', "(unknown)") or "(unknown)"
if text in formats_count:
formats_count[text] += 1
else:
formats_count[text] = 1
formats_order.append(text)
formats = []
for medium_format in formats_order:
count = formats_count[medium_format]
medium_format = RELEASE_FORMATS.get(medium_format, medium_format)
if count > 1:
medium_format = str(count) + "×" + medium_format
formats.append(medium_format)
return " + ".join(formats)
def track_to_metadata(node, track):
m = track.metadata
recording_to_metadata(node['recording'], m, track)
m.add_unique('musicbrainz_trackid', node['id'])
# overwrite with data we have on the track
for key, value in node.items():
if not value:
continue
if key in _TRACK_TO_METADATA:
m[_TRACK_TO_METADATA[key]] = value
elif key == 'length' and value:
m.length = value
elif key == 'artist-credit':
artist_credit_to_metadata(value, m)
if m.length:
m['~length'] = format_time(m.length)
def recording_to_metadata(node, m, track=None):
m.length = 0
m.add_unique('musicbrainz_recordingid', node['id'])
for key, value in node.items():
if not value:
continue
if key in _RECORDING_TO_METADATA:
m[_RECORDING_TO_METADATA[key]] = value
elif key == 'user-rating':
m['~rating'] = value['value']
elif key == 'length':
m.length = value
elif key == 'artist-credit':
artist_credit_to_metadata(value, m)
# set tags from artists
if track:
for credit in value:
artist = credit['artist']
artist_obj = track.append_track_artist(artist['id'])
add_genres_from_node(artist, artist_obj)
elif key == 'relations':
_relations_to_metadata(value, m)
elif key in ('genres', 'tags') and track:
add_genres(value, track)
elif key in ('user-genres', 'user-tags') and track:
add_user_genres(value, track)
elif key == 'isrcs':
add_isrcs_to_metadata(value, m)
elif key == 'video' and value:
m['~video'] = '1'
if m['title']:
m['~recordingtitle'] = m['title']
if m.length:
m['~length'] = format_time(m.length)
def performance_to_metadata(relation, m):
if 'attributes' in relation:
for attribute in relation['attributes']:
m.add_unique("~performance_attributes", attribute)
def work_to_metadata(work, m):
m.add_unique("musicbrainz_workid", work['id'])
if 'languages' in work:
for language in work['languages']:
m.add_unique("language", language)
elif 'language' in work:
m.add_unique("language", work['language'])
if 'title' in work:
m.add_unique("work", work['title'])
if 'relations' in work:
_relations_to_metadata(work['relations'], m)
def medium_to_metadata(node, m):
for key, value in node.items():
if not value:
continue
if key in _MEDIUM_TO_METADATA:
m[_MEDIUM_TO_METADATA[key]] = value
def artist_to_metadata(node, m):
"""Make meatadata dict from a JSON 'artist' node."""
m.add_unique("musicbrainz_artistid", node['id'])
for key, value in node.items():
if not value:
continue
if key in _ARTIST_TO_METADATA:
m[_ARTIST_TO_METADATA[key]] = value
elif key == "area":
m["area"] = value['name']
elif key == "life-span":
if "begin" in value:
m["begindate"] = value['begin']
if "ended" in value:
ended = value['ended']
if ended and "end" in value:
m["enddate"] = value['end']
elif key == "begin-area":
m["beginarea"] = value['name']
elif key == "end-area":
m["endarea"] = value['name']
def release_to_metadata(node, m, album=None):
"""Make metadata dict from a JSON 'release' node."""
m.add_unique('musicbrainz_albumid', node['id'])
for key, value in node.items():
if not value:
continue
if key in _RELEASE_TO_METADATA:
m[_RELEASE_TO_METADATA[key]] = value
elif key == 'status':
m['releasestatus'] = value.lower()
elif key == 'artist-credit':
artist_credit_to_metadata(value, m, release=True)
# set tags from artists
if album is not None:
for credit in value:
artist = credit['artist']
artist_obj = album.append_album_artist(artist['id'])
add_genres_from_node(artist, artist_obj)
elif key == 'relations':
_relations_to_metadata(value, m)
elif key == 'label-info':
m['label'], m['catalognumber'] = label_info_from_node(value)
elif key == 'text-representation':
if 'language' in value:
m['~releaselanguage'] = value['language']
if 'script' in value:
m['script'] = value['script']
m['~releasecountries'] = release_countries = countries_from_node(node)
# The MB web service returns the first release country in the country tag.
# If the user has configured preferred release countries, use the first one
# if it is one in the complete list of release countries.
for country in config.setting["preferred_release_countries"]:
if country in release_countries:
m['releasecountry'] = country
break
add_genres_from_node(node, album)
def release_group_to_metadata(node, m, release_group=None):
"""Make metadata dict from a JSON 'release-group' node taken from inside a 'release' node."""
m.add_unique('musicbrainz_releasegroupid', node['id'])
for key, value in node.items():
if not value:
continue
if key in _RELEASE_GROUP_TO_METADATA:
m[_RELEASE_GROUP_TO_METADATA[key]] = value
elif key == 'primary-type':
m['~primaryreleasetype'] = value.lower()
elif key == 'secondary-types':
add_secondary_release_types(value, m)
add_genres_from_node(node, release_group)
if m['originaldate']:
m['originalyear'] = m['originaldate'][:4]
m['releasetype'] = m.getall('~primaryreleasetype') + m.getall('~secondaryreleasetype')
def add_secondary_release_types(node, m):
for secondary_type in node:
m.add_unique('~secondaryreleasetype', secondary_type.lower())
def add_genres_from_node(node, obj):
if obj is None:
return
if 'genres' in node:
add_genres(node['genres'], obj)
if 'tags' in node:
add_genres(node['tags'], obj)
if 'user-genres' in node:
add_user_genres(node['user-genres'], obj)
if 'user-tags' in node:
add_user_genres(node['user-tags'], obj)
def add_genres(node, obj):
for tag in node:
key = tag['name']
count = tag['count']
if key:
obj.add_genre(key, count)
def add_user_genres(node, obj):
for tag in node:
key = tag['name']
if key:
obj.add_genre(key, 1)
def add_isrcs_to_metadata(node, metadata):
for isrc in node:
metadata.add('isrc', isrc)
def get_score(node):
"""Returns the score attribute for a node.
The score is expected to be an integer between 0 and 100, it is returned as
a value between 0.0 and 1.0. If there is no score attribute or it has an
invalid value 1.0 will be returned.
"""
try:
return int(node.get('score', 100)) / 100
except (TypeError, ValueError):
return 1.0
|
Sophist-UK/Sophist_picard
|
picard/mbjson.py
|
Python
|
gpl-2.0
| 18,697
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0041_auto_20160214_1225'),
]
operations = [
migrations.AlterField(
model_name='event',
name='status',
field=models.CharField(default='d', max_length=1, choices=[('d', 'utkast'), ('b', 'väntar på godkännande'), ('r', 'Avslaget'), ('a', 'Godkänt')]),
),
]
|
I-sektionen/i-portalen
|
wsgi/iportalen_django/events/migrations/0042_auto_20160215_2024.py
|
Python
|
mit
| 512
|
# -*- coding: utf-8 -*-
"""The json serializer object implementation."""
import binascii
import collections
import json
from dfvfs.path import path_spec as dfvfs_path_spec
from dfvfs.path import factory as dfvfs_path_spec_factory
from plaso.containers import interface as containers_interface
from plaso.containers import manager as containers_manager
from plaso.lib import py2to3
from plaso.serializer import interface
class JSONAttributeContainerSerializer(interface.AttributeContainerSerializer):
"""Class that implements the json attribute container serializer."""
@classmethod
def _ConvertAttributeContainerToDict(cls, attribute_container):
"""Converts an attribute container object into a JSON dictionary.
The resulting dictionary of the JSON serialized objects consists of:
{
'__type__': 'AttributeContainer'
'__container_type__': ...
...
}
Here '__type__' indicates the object base type. In this case
'AttributeContainer'.
'__container_type__' indicates the container type and rest of the elements
of the dictionary make up the attributes of the container.
Args:
attribute_container (AttributeContainer): attribute container.
Returns:
dict[str, object]: JSON serialized objects.
Raises:
TypeError: if not an instance of AttributeContainer.
ValueError: if the attribute container type is not supported.
"""
if not isinstance(
attribute_container, containers_interface.AttributeContainer):
raise TypeError(u'{0:s} is not an attribute container type.'.format(
type(attribute_container)))
container_type = getattr(attribute_container, u'CONTAINER_TYPE', None)
if not container_type:
raise ValueError(u'Unsupported attribute container type: {0:s}.'.format(
type(attribute_container)))
json_dict = {
u'__type__': u'AttributeContainer',
u'__container_type__': container_type,
}
for attribute_name, attribute_value in attribute_container.GetAttributes():
if attribute_value is None:
continue
json_dict[attribute_name] = cls._ConvertAttributeValueToDict(
attribute_value)
return json_dict
@classmethod
def _ConvertAttributeValueToDict(cls, attribute_value):
"""Converts an attribute value into a JSON dictionary.
Args:
attribute_value: an attribute value.
Returns:
The JSON serialized object which can be:
* a dictionary;
* a list.
"""
if isinstance(attribute_value, py2to3.BYTES_TYPE):
attribute_value = {
u'__type__': u'bytes',
u'stream': u'{0:s}'.format(binascii.b2a_qp(attribute_value))
}
elif isinstance(attribute_value, (list, tuple)):
json_list = []
for list_element in attribute_value:
json_dict = cls._ConvertAttributeValueToDict(list_element)
json_list.append(json_dict)
if isinstance(attribute_value, list):
attribute_value = json_list
else:
attribute_value = {
u'__type__': u'tuple',
u'values': json_list
}
elif isinstance(attribute_value, collections.Counter):
attribute_value = cls._ConvertCollectionsCounterToDict(attribute_value)
elif isinstance(attribute_value, dfvfs_path_spec.PathSpec):
attribute_value = cls._ConvertPathSpecToDict(attribute_value)
elif isinstance(attribute_value, containers_interface.AttributeContainer):
attribute_value = cls._ConvertAttributeContainerToDict(attribute_value)
return attribute_value
@classmethod
def _ConvertCollectionsCounterToDict(cls, collections_counter):
"""Converts a collections.Counter object into a JSON dictionary.
The resulting dictionary of the JSON serialized objects consists of:
{
'__type__': 'collections.Counter'
...
}
Here '__type__' indicates the object base type. In this case
'collections.Counter'. The rest of the elements of the dictionary make up
the collections.Counter object attributes.
Args:
collections_counter (collections.Counter): counter.
Returns:
dict[str, object]: JSON serialized objects.
Raises:
TypeError: if not an instance of collections.Counter.
"""
if not isinstance(collections_counter, collections.Counter):
raise TypeError
json_dict = {u'__type__': u'collections.Counter'}
for attribute_name, attribute_value in iter(collections_counter.items()):
if attribute_value is None:
continue
if isinstance(attribute_value, py2to3.BYTES_TYPE):
attribute_value = {
u'__type__': u'bytes',
u'stream': u'{0:s}'.format(binascii.b2a_qp(attribute_value))
}
json_dict[attribute_name] = attribute_value
return json_dict
@classmethod
def _ConvertDictToObject(cls, json_dict):
"""Converts a JSON dict into an object.
The dictionary of the JSON serialized objects consists of:
{
'__type__': 'AttributeContainer'
'__container_type__': ...
...
}
Here '__type__' indicates the object base type. In this case
'AttributeContainer'.
'__container_type__' indicates the attribute container type.
The rest of the elements of the dictionary make up the attributes.
Args:
json_dict (dict[str, object]): JSON serialized objects.
Returns:
A deserialized object which can be:
* an attribute container (instance of AttributeContainer);
* a dictionary;
* a list;
* a tuple.
Raises:
ValueError: if the class type or container type is not supported.
"""
# Use __type__ to indicate the object class type.
class_type = json_dict.get(u'__type__', None)
if not class_type:
# Dealing with a regular dict.
return json_dict
if class_type == u'bytes':
return binascii.a2b_qp(json_dict[u'stream'])
elif class_type == u'tuple':
return tuple(cls._ConvertListToObject(json_dict[u'values']))
elif class_type == u'collections.Counter':
return cls._ConvertDictToCollectionsCounter(json_dict)
elif class_type == u'AttributeContainer':
# Use __container_type__ to indicate the attribute container type.
container_type = json_dict.get(u'__container_type__', None)
# Since we would like the JSON as flat as possible we handle decoding
# a path specification.
elif class_type == u'PathSpec':
return cls._ConvertDictToPathSpec(json_dict)
else:
raise ValueError(u'Unsupported class type: {0:s}'.format(class_type))
container_class = (
containers_manager.AttributeContainersManager.GetAttributeContainer(
container_type))
if not container_class:
raise ValueError(u'Unsupported container type: {0:s}'.format(
container_type))
container_object = container_class()
for attribute_name, attribute_value in iter(json_dict.items()):
if attribute_name.startswith(u'__'):
continue
# Event tags should be serialized separately.
# TODO: remove when analysis report no longer defines event tags.
if (container_type == u'analysis_report' and
attribute_name == u'_event_tags'):
continue
# Be strict about which attributes to set in non event objects.
if (container_type != u'event' and
attribute_name not in container_object.__dict__):
continue
if isinstance(attribute_value, dict):
attribute_value = cls._ConvertDictToObject(attribute_value)
elif isinstance(attribute_value, list):
attribute_value = cls._ConvertListToObject(attribute_value)
setattr(container_object, attribute_name, attribute_value)
return container_object
@classmethod
def _ConvertDictToCollectionsCounter(cls, json_dict):
"""Converts a JSON dict into a collections.Counter.
The dictionary of the JSON serialized objects consists of:
{
'__type__': 'collections.Counter'
...
}
Here '__type__' indicates the object base type. In this case this should
be 'collections.Counter'. The rest of the elements of the dictionary make up
the preprocessing object properties.
Args:
json_dict (dict[str, object]): JSON serialized objects.
Returns:
collections.Counter: counter.
"""
collections_counter = collections.Counter()
for key, value in iter(json_dict.items()):
if key == u'__type__':
continue
collections_counter[key] = value
return collections_counter
@classmethod
def _ConvertListToObject(cls, json_list):
"""Converts a JSON list into an object.
Args:
json_list: a list of the JSON serialized objects.
Returns:
A deserialized list.
"""
list_value = []
for json_list_element in json_list:
if isinstance(json_list_element, dict):
list_value.append(cls._ConvertDictToObject(json_list_element))
elif isinstance(json_list_element, list):
list_value.append(cls._ConvertListToObject(json_list_element))
else:
list_value.append(json_list_element)
return list_value
@classmethod
def _ConvertDictToPathSpec(cls, json_dict):
"""Converts a JSON dict into a path specification object.
The dictionary of the JSON serialized objects consists of:
{
'__type__': 'PathSpec'
'type_indicator': 'OS'
'parent': { ... }
...
}
Here '__type__' indicates the object base type. In this case this should
be 'PathSpec'. The rest of the elements of the dictionary make up the
path specification object properties.
Args:
json_dict (dict[str, object]): JSON serialized objects.
Returns:
path.PathSpec: path specification.
"""
type_indicator = json_dict.get(u'type_indicator', None)
if type_indicator:
del json_dict[u'type_indicator']
if u'parent' in json_dict:
json_dict[u'parent'] = cls._ConvertDictToPathSpec(json_dict[u'parent'])
# Remove the class type from the JSON dict since we cannot pass it.
del json_dict[u'__type__']
return dfvfs_path_spec_factory.Factory.NewPathSpec(
type_indicator, **json_dict)
@classmethod
def _ConvertPathSpecToDict(cls, path_spec_object):
"""Converts a path specification object into a JSON dictionary.
The resulting dictionary of the JSON serialized objects consists of:
{
'__type__': 'PathSpec'
'type_indicator': 'OS'
'parent': { ... }
...
}
Here '__type__' indicates the object base type. In this case 'PathSpec'.
The rest of the elements of the dictionary make up the path specification
object properties. The supported property names are defined in
path_spec_factory.Factory.PROPERTY_NAMES. Note that this method is called
recursively for every path specification object and creates a dict of
dicts in the process.
Args:
path_spec_object (dfvfs.PathSpec): path specification.
Returns:
dict[str, object]: JSON serialized objects.
Raises:
TypeError: if not an instance of dfvfs.PathSpec.
"""
if not isinstance(path_spec_object, dfvfs_path_spec.PathSpec):
raise TypeError
json_dict = {u'__type__': u'PathSpec'}
for property_name in dfvfs_path_spec_factory.Factory.PROPERTY_NAMES:
property_value = getattr(path_spec_object, property_name, None)
if property_value is not None:
json_dict[property_name] = property_value
if path_spec_object.HasParent():
json_dict[u'parent'] = cls._ConvertPathSpecToDict(path_spec_object.parent)
json_dict[u'type_indicator'] = path_spec_object.type_indicator
location = getattr(path_spec_object, u'location', None)
if location:
json_dict[u'location'] = location
return json_dict
@classmethod
def ReadSerialized(cls, json_string):
"""Reads an attribute container from serialized form.
Args:
json_string: a JSON string containing the serialized form.
Returns:
AttributeContainer: attribute container or None.
"""
if not json_string:
return
json_dict = json.loads(json_string)
return cls.ReadSerializedDict(json_dict)
@classmethod
def ReadSerializedDict(cls, json_dict):
"""Reads an attribute container from serialized dictionary form.
Args:
json_dict (dict[str, object]): JSON serialized objects.
Returns:
AttributeContainer: attribute container or None.
"""
if not json_dict:
return
return cls._ConvertDictToObject(json_dict)
@classmethod
def WriteSerialized(cls, attribute_container):
"""Writes an attribute container to serialized form.
Args:
attribute_container (AttributeContainer): attribute container.
Returns:
A JSON string containing the serialized form.
"""
json_dict = cls.WriteSerializedDict(attribute_container)
return json.dumps(json_dict)
@classmethod
def WriteSerializedDict(cls, attribute_container):
"""Writes an attribute container to serialized form.
Args:
attribute_container (AttributeContainer): attribute container.
Returns:
dict[str, object]: JSON serialized objects.
"""
return cls._ConvertAttributeContainerToDict(attribute_container)
|
dc3-plaso/plaso
|
plaso/serializer/json_serializer.py
|
Python
|
apache-2.0
| 13,349
|
from __future__ import division, print_function
import autograd.numpy as np
import autograd.numpy.random as npr
from autograd.scipy.misc import logsumexp
from autograd.convenience_wrappers import value_and_grad as vgrad
from functools import partial
from os.path import join, dirname
import string
import sys
def EM(init_params, data, callback=None):
def EM_update(params):
natural_params = map(np.log, params)
loglike, E_stats = vgrad(log_partition_function)(natural_params, data) # E step
if callback: callback(loglike, params)
return map(normalize, E_stats) # M step
def fixed_point(f, x0):
x1 = f(x0)
while different(x0, x1):
x0, x1 = x1, f(x1)
return x1
def different(params1, params2):
allclose = partial(np.allclose, atol=1e-3, rtol=1e-3)
return not all(map(allclose, params1, params2))
return fixed_point(EM_update, init_params)
def normalize(a):
def replace_zeros(a):
return np.where(a > 0., a, 1.)
return a / replace_zeros(a.sum(-1, keepdims=True))
def log_partition_function(natural_params, data):
if isinstance(data, list):
return sum(map(partial(log_partition_function, natural_params), data))
log_pi, log_A, log_B = natural_params
log_alpha = log_pi
for y in data:
log_alpha = logsumexp(log_alpha[:,None] + log_A, axis=0) + log_B[:,y]
return logsumexp(log_alpha)
def initialize_hmm_parameters(num_states, num_outputs):
init_pi = normalize(npr.rand(num_states))
init_A = normalize(npr.rand(num_states, num_states))
init_B = normalize(npr.rand(num_states, num_outputs))
return init_pi, init_A, init_B
def build_dataset(filename, max_lines=-1):
"""Loads a text file, and turns each line into an encoded sequence."""
encodings = dict(map(reversed, enumerate(string.printable)))
digitize = lambda char: encodings[char] if char in encodings else len(encodings)
encode_line = lambda line: np.array(list(map(digitize, line)))
nonblank_line = lambda line: len(line) > 2
with open(filename) as f:
lines = f.readlines()
encoded_lines = map(encode_line, filter(nonblank_line, lines)[:max_lines])
num_outputs = len(encodings) + 1
return encoded_lines, num_outputs
if __name__ == '__main__':
np.random.seed(0)
np.seterr(divide='ignore')
# callback to print log likelihoods during training
print_loglike = lambda loglike, params: print(loglike)
# load training data
lstm_filename = join(dirname(__file__), 'lstm.py')
train_inputs, num_outputs = build_dataset(lstm_filename, max_lines=60)
# train with EM
num_states = 20
init_params = initialize_hmm_parameters(num_states, num_outputs)
pi, A, B = EM(init_params, train_inputs, print_loglike)
|
barak/autograd
|
examples/hmm_em.py
|
Python
|
mit
| 2,862
|
#!/usr/bin/python
import struct
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('127.0.0.1', 12777))
sock.listen(1)
conn, addr = sock.accept()
data = ''
while 1:
while len(data) < 16:
data += conn.recv(1024)
if not data: break
if not data: break
i, size = struct.unpack('QQ', data[0:16])
print("Received id %s with %s bytes" % (i, size))
while len(data) < size + 16:
data += conn.recv(1024)
if not data: break
if not data: break
# data = data[16+size:]
for j in range(0, size):
print struct.unpack('B', data[16+j])
data = data[16+size:]
sock.close()
|
jonstewart/liblightgrep
|
pytest/receive_data.py
|
Python
|
gpl-3.0
| 634
|
# Foremast - Pipeline Tooling
#
# Copyright 2018 Gogo, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Get Application properties that have been generated by `create-configs`."""
import json
import logging
LOG = logging.getLogger(__name__)
def get_properties(properties_file='raw.properties.json', env=None, region=None):
"""Get contents of _properties_file_ for the _env_.
Args:
properties_file (str): File name of `create-configs` JSON output.
env (str): Environment to read optionally.
region (str): Region to get specific configs for.
Returns:
dict: JSON loaded Application properties for _env_.
None: Given _env_ was not found in `create-configs` JSON output.
"""
with open(properties_file, 'rt') as file_handle:
properties = json.load(file_handle)
env_properties = properties.get(env, properties)
contents = env_properties.get(region, env_properties)
LOG.debug('Found properties for %s:\n%s', env, contents)
return contents
|
gogoair/foremast
|
src/foremast/utils/properties.py
|
Python
|
apache-2.0
| 1,548
|
"""Provides the options command."""
from functools import partial
from gsb.intercept import Menu
from forms import set_value
from parsers import parser
from options import options
from util import done
def show_section(section, caller):
"""Show the player an instance of OptionsMenu."""
caller.connection.notify(OptionsMenu, section)
def show_option(option, caller):
"""Show and edit an option."""
player = caller.connection.player
def invalid_input(caller):
"""Show invalid input warning."""
player.notify('Invalid input: %r.', caller.text)
show_section(option.section, caller)
def after(caller):
"""Set the value."""
done(player)
show_section(option.section, caller)
value = getattr(player.options, option.name)
if value is True:
value = 'Enabled'
elif value is False:
value = 'Disabled'
elif value is None:
value = 'Clear'
else:
value = repr(value)
player.notify(
'%s\n%s\nCurrent value: %s\n',
option.friendly_name,
option.description,
value
)
set_value(
player.options,
option.name,
caller,
after=after,
invalid_input=invalid_input
)
class OptionsMenu(Menu):
"""Show all the sub-sections and the options of a section."""
def __init__(self, section):
self.section = section
super(OptionsMenu, self).__init__(
title='%s\n%s' % (
section.name,
section.description
),
restore_parser=parser
)
def explain(self, connection):
"""Build the menu."""
player = connection.player
self.items.clear()
self.labels.clear()
if self.section.sections or self.section.parent is not None:
self.add_label('Sections', None)
for subsection in self.section.sections:
if subsection.allowed is None or subsection.allowed(player):
self.item(subsection.name)(partial(show_section, subsection))
if self.section.parent is not None:
self.item('Back to %s' % self.section.parent.name)(
partial(show_section, self.section.parent)
)
if self.items:
after = self.items[-1]
else:
after = None
if self.section.options:
self.add_label('Options', after)
for option in self.section.options:
self.item(option.friendly_name)(partial(show_option, option))
return super(OptionsMenu, self).explain(connection)
@parser.command
def do_options(caller):
"""options
Set options for your player."""
show_section(options, caller)
|
chrisnorman7/game
|
commands/options.py
|
Python
|
mpl-2.0
| 2,845
|
import codecs
import re
import types
import sys
from constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from constants import encodings, ReparseException
import utils
#Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([str(item) for item in spaceCharacters])
asciiLettersBytes = frozenset([str(item) for item in asciiLetters])
asciiUppercaseBytes = frozenset([str(item) for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([">", "<"])
invalid_unicode_re = re.compile(u"[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]")
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile(ur"[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream:
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1,0] #chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos < self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= pos
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
data = rv.append(bufferedData[bufferOffset:
bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return "".join(rv)
class HTMLInputStream:
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
#Craziness
if len(u"\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
self.replaceCharactersRegexp = re.compile(u"[\uD800-\uDFFF]")
else:
self.reportCharacterErrors = self.characterErrorsUCS2
self.replaceCharactersRegexp = re.compile(u"([\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF])")
# List of where new lines occur
self.newLines = [0]
self.charEncoding = (codecName(encoding), "certain")
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
# Encoding Information
#Number of bytes to use when looking for a meta element with
#encoding information
self.numBytesMeta = 512
#Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
#Encoding to use if no other information can be found
self.defaultEncoding = "windows-1252"
#Detect encoding iff no explicit "transport level" encoding is supplied
if (self.charEncoding[0] is None):
self.charEncoding = self.detectEncoding(parseMeta, chardet)
self.reset()
def reset(self):
self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream,
'replace')
self.chunk = u""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
#Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
# Otherwise treat source as a string and convert to a file object
if isinstance(source, unicode):
source = source.encode('utf-8')
self.charEncoding = ("utf-8", "certain")
import cStringIO
stream = cStringIO.StringIO(str(source))
if (not(hasattr(stream, "tell") and hasattr(stream, "seek")) or
stream is sys.stdin):
stream = BufferedStream(stream)
return stream
def detectEncoding(self, parseMeta=True, chardet=True):
#First look for a BOM
#This will also read past the BOM if present
encoding = self.detectBOM()
confidence = "certain"
#If there is no BOM need to look for meta elements with encoding
#information
if encoding is None and parseMeta:
encoding = self.detectEncodingMeta()
confidence = "tentative"
#Guess with chardet, if avaliable
if encoding is None and chardet:
confidence = "tentative"
try:
from chardet.universaldetector import UniversalDetector
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = detector.result['encoding']
self.rawStream.seek(0)
except ImportError:
pass
# If all else fails use the default encoding
if encoding is None:
confidence="tentative"
encoding = self.defaultEncoding
#Substitute for equivalent encodings:
encodingSub = {"iso-8859-1":"windows-1252"}
if encoding.lower() in encodingSub:
encoding = encodingSub[encoding.lower()]
return encoding, confidence
def changeEncoding(self, newEncoding):
newEncoding = codecName(newEncoding)
if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"):
newEncoding = "utf-8"
if newEncoding is None:
return
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.reset()
self.charEncoding = (newEncoding, "certain")
raise ReparseException, "Encoding changed from %s to %s"%(self.charEncoding[0], newEncoding)
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be',
codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
self.rawStream.seek(encoding and seek or 0)
return encoding
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding in ("utf-16", "utf-16-be", "utf-16-le"):
encoding = "utf-8"
return encoding
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count(u'\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind(u'\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line+1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = u""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
#Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
self.reportCharacterErrors(data)
# Replace invalid characters
# Note U+0000 is dealt with in the tokenizer
data = self.replaceCharactersRegexp.sub(u"\ufffd", data)
data = data.replace(u"\r\n", u"\n")
data = data.replace(u"\r", u"\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for i in xrange(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
#Someone picked the wrong compile option
#You lose
skip = False
import sys
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
#Pretty sure there should be endianness issues here
if utils.isSurrogatePair(data[pos:pos+2]):
#We have a surrogate pair!
char_val = utils.surrogatePairToCodepoint(data[pos:pos+2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite = False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = u"".join([u"\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = u"^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile(u"[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = u"".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class EncodingBytes(str):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
return str.__new__(self, value.lower())
def __init__(self, value):
self._position=-1
def __iter__(self):
return self
def next(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p]
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p+len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes)-1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
("<!--",self.handleComment),
("<meta",self.handleMeta),
("</",self.handlePossibleEndTag),
("<!",self.handleOther),
("<?",self.handleOther),
("<",self.handlePossibleStartTag))
for byte in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing=False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo("-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
#if we have <meta not followed by a space so just keep going
return True
#We have a valid meta element we want to search for attributes
while True:
#Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == "charset":
tentativeEncoding = attr[1]
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == "content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
self.data.next()
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
#If the next byte is not an ascii letter either ignore this
#fragment (possible start tag case) or treat it according to
#handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == "<":
#return to the first step in the overall "two step" algorithm
#reprocessing the < byte
data.previous()
else:
#Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset("/"))
# Step 2
if c in (">", None):
return None
# Step 3
attrName = []
attrValue = []
#Step 4 attribute name
while True:
if c == "=" and attrName:
break
elif c in spaceCharactersBytes:
#Step 6!
c = data.skip()
c = data.next()
break
elif c in ("/", ">"):
return "".join(attrName), ""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c == None:
return None
else:
attrName.append(c)
#Step 5
c = data.next()
#Step 7
if c != "=":
data.previous()
return "".join(attrName), ""
#Step 8
data.next()
#Step 9
c = data.skip()
#Step 10
if c in ("'", '"'):
#10.1
quoteChar = c
while True:
#10.2
c = data.next()
#10.3
if c == quoteChar:
data.next()
return "".join(attrName), "".join(attrValue)
#10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
#10.5
else:
attrValue.append(c)
elif c == ">":
return "".join(attrName), ""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = data.next()
if c in spacesAngleBrackets:
return "".join(attrName), "".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
self.data = data
def parse(self):
try:
#Check if the attr name is charset
#otherwise return
self.data.jumpTo("charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == "=":
#If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
#Look for an encoding between matching quote marks
if self.data.currentByte in ('"', "'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
#Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
#Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def codecName(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if (encoding is not None and type(encoding) in types.StringTypes):
canonicalName = ascii_punctuation_re.sub("", encoding).lower()
return encodings.get(canonicalName, None)
else:
return None
|
dewitt/appengine-unshorten
|
third_party/html5lib/inputstream.py
|
Python
|
apache-2.0
| 27,634
|
# -*- coding: utf-8 -*-
#
# This file is part of the bliss project
#
# Copyright (c) 2016 Beamline Control Unit, ESRF
# Distributed under the GNU LGPLv3. See LICENSE for more info.
from bliss.common.task_utils import cleanup, error_cleanup, task
from bliss.common.measurement import SamplingCounter
from bliss.common.greenlet_utils import protect_from_kill
from gevent.lock import Semaphore
import time
import gevent
import socket
class LSCounter(SamplingCounter):
def __init__(self, name, controller, channel):
SamplingCounter.__init__(self, controller.name+'.'+name, controller)
self.__controller = controller
self.__channel = channel
@property
def channel(self):
return self.__channel
def read(self):
return float(self.__controller._putget("krdg? %s" % self.channel))
class ls335(object):
def __init__(self, name, config):
self.name = name
self.gpib_controller_host = config.get("gpib_controller_host")
self.gpib_address = config.get("gpib_address")
self.__lock = Semaphore()
self.__control = None
def __connect(self):
self.__control = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.__control.connect((self.gpib_controller_host, 1234))
self.__control.sendall('++mode 1\r\n++addr %d\r\n++auto 0\r\nmode 0\r\n' % self.gpib_address)
return self._putget("*idn?").startswith("LS")
@protect_from_kill
def _putget(self, cmd):
with self.__lock:
if self.__control is None:
self.__connect()
self.__control.sendall("%s\r\n++read eoi\r\n" % cmd)
return self.__control.recv(1024)
@property
def A(self):
return LSCounter("A", self, "a")
@property
def B(self):
return LSCounter("B", self, "b")
|
tiagocoutinho/bliss
|
bliss/controllers/ls335.py
|
Python
|
lgpl-3.0
| 1,796
|
""" Classes to handle HTMLText and Catalogues in PubTal.
Copyright (c) 2015 Colin Stewart (http://www.owlfish.com/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
If you make any bug fixes or feature enhancements please let me know!
"""
try:
import logging
except:
import InfoLogging as logging
from pubtal import SitePublisher
try:
import markdown2
except:
pass
import os, time, anydbm, codecs
from simpletal import simpleTAL, simpleTALES
# getPluginInfo provides the list of built-in supported content.
def getPluginInfo ():
builtInContent = [{'functionality': 'content', 'content-type': 'Markdown' ,'file-type': 'md','class': MarkdownPagePublisher}]
return builtInContent
class MarkdownPagePublisher (SitePublisher.ContentPublisher):
def __init__ (self, pagePublisher):
SitePublisher.ContentPublisher.__init__ (self, pagePublisher)
self.log = logging.getLogger ("PubTal.MarkdownPagePublisher")
def publish (self, page):
templateName = page.getOption ('template')
# Get this template's configuration
template = self.templateConfig.getTemplate (templateName)
context = simpleTALES.Context(allowPythonPath=1)
# Get the page context for this content
map = self.getPageContext (page, template)
# Determine the destination for this page
relativeDestPath = map ['destinationPath']
context.addGlobal ('page', map)
macros = page.getMacros()
self.pagePublisher.expandTemplate (template, context, relativeDestPath, macros)
def getPageContext (self, page, template):
pageMap = SitePublisher.ContentPublisher.getPageContext (self, page, template)
headers, rawContent = self.readHeadersAndContent(page)
# Convert the body from markdown to HTML
content = markdown2.markdown (rawContent)
actualHeaders = pageMap ['headers']
actualHeaders.update (headers)
pageMap ['headers'] = actualHeaders
pageMap ['content'] = content
pageMap ['rawContent'] = rawContent
return pageMap
|
owlfish/pubtal
|
lib/pubtal/plugins/markdown.py
|
Python
|
bsd-3-clause
| 3,304
|
from .file_logger import FileLogger
|
philipperemy/tensorflow-phased-lstm
|
helpers/__init__.py
|
Python
|
mit
| 35
|
# xiaolongdolly 2017.8.20
cubes = [cube**3 for cube in range(1, 11)]
print(cubes)
|
xiaolongdolly/Python_Course
|
chapter_4/cubes/cubes_2.py
|
Python
|
gpl-3.0
| 83
|
# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
#
# SPDX-License-Identifier: GPL-2.0
# Logic to spawn a sub-process and interact with its stdio.
import os
import re
import pty
import signal
import select
import time
class Timeout(Exception):
"""An exception sub-class that indicates that a timeout occurred."""
pass
class Spawn(object):
"""Represents the stdio of a freshly created sub-process. Commands may be
sent to the process, and responses waited for.
Members:
output: accumulated output from expect()
"""
def __init__(self, args, cwd=None):
"""Spawn (fork/exec) the sub-process.
Args:
args: array of processs arguments. argv[0] is the command to
execute.
cwd: the directory to run the process in, or None for no change.
Returns:
Nothing.
"""
self.waited = False
self.buf = ''
self.output = ''
self.logfile_read = None
self.before = ''
self.after = ''
self.timeout = None
# http://stackoverflow.com/questions/7857352/python-regex-to-match-vt100-escape-sequences
# Note that re.I doesn't seem to work with this regex (or perhaps the
# version of Python in Ubuntu 14.04), hence the inclusion of a-z inside
# [] instead.
self.re_vt100 = re.compile('(\x1b\[|\x9b)[^@-_a-z]*[@-_a-z]|\x1b[@-_a-z]')
(self.pid, self.fd) = pty.fork()
if self.pid == 0:
try:
# For some reason, SIGHUP is set to SIG_IGN at this point when
# run under "go" (www.go.cd). Perhaps this happens under any
# background (non-interactive) system?
signal.signal(signal.SIGHUP, signal.SIG_DFL)
if cwd:
os.chdir(cwd)
os.execvp(args[0], args)
except:
print 'CHILD EXECEPTION:'
import traceback
traceback.print_exc()
finally:
os._exit(255)
try:
self.poll = select.poll()
self.poll.register(self.fd, select.POLLIN | select.POLLPRI | select.POLLERR | select.POLLHUP | select.POLLNVAL)
except:
self.close()
raise
def kill(self, sig):
"""Send unix signal "sig" to the child process.
Args:
sig: The signal number to send.
Returns:
Nothing.
"""
os.kill(self.pid, sig)
def isalive(self):
"""Determine whether the child process is still running.
Args:
None.
Returns:
Boolean indicating whether process is alive.
"""
if self.waited:
return False
w = os.waitpid(self.pid, os.WNOHANG)
if w[0] == 0:
return True
self.waited = True
return False
def send(self, data):
"""Send data to the sub-process's stdin.
Args:
data: The data to send to the process.
Returns:
Nothing.
"""
os.write(self.fd, data)
def expect(self, patterns):
"""Wait for the sub-process to emit specific data.
This function waits for the process to emit one pattern from the
supplied list of patterns, or for a timeout to occur.
Args:
patterns: A list of strings or regex objects that we expect to
see in the sub-process' stdout.
Returns:
The index within the patterns array of the pattern the process
emitted.
Notable exceptions:
Timeout, if the process did not emit any of the patterns within
the expected time.
"""
for pi in xrange(len(patterns)):
if type(patterns[pi]) == type(''):
patterns[pi] = re.compile(patterns[pi])
tstart_s = time.time()
try:
while True:
earliest_m = None
earliest_pi = None
for pi in xrange(len(patterns)):
pattern = patterns[pi]
m = pattern.search(self.buf)
if not m:
continue
if earliest_m and m.start() >= earliest_m.start():
continue
earliest_m = m
earliest_pi = pi
if earliest_m:
pos = earliest_m.start()
posafter = earliest_m.end()
self.before = self.buf[:pos]
self.after = self.buf[pos:posafter]
self.output += self.buf[:posafter]
self.buf = self.buf[posafter:]
return earliest_pi
tnow_s = time.time()
if self.timeout:
tdelta_ms = (tnow_s - tstart_s) * 1000
poll_maxwait = self.timeout - tdelta_ms
if tdelta_ms > self.timeout:
raise Timeout()
else:
poll_maxwait = None
events = self.poll.poll(poll_maxwait)
if not events:
raise Timeout()
c = os.read(self.fd, 1024)
if not c:
raise EOFError()
if self.logfile_read:
self.logfile_read.write(c)
self.buf += c
# count=0 is supposed to be the default, which indicates
# unlimited substitutions, but in practice the version of
# Python in Ubuntu 14.04 appears to default to count=2!
self.buf = self.re_vt100.sub('', self.buf, count=1000000)
finally:
if self.logfile_read:
self.logfile_read.flush()
def close(self):
"""Close the stdio connection to the sub-process.
This also waits a reasonable time for the sub-process to stop running.
Args:
None.
Returns:
Nothing.
"""
os.close(self.fd)
for i in xrange(100):
if not self.isalive():
break
time.sleep(0.1)
def get_expect_output(self):
"""Return the output read by expect()
Returns:
The output processed by expect(), as a string.
"""
return self.output
|
guileschool/beagleboard
|
u-boot/test/py/u_boot_spawn.py
|
Python
|
mit
| 6,482
|
# -*- coding: utf-8 -*-
"""
tests/__init__.py
"""
import unittest
import trytond.tests.test_tryton
from tests.test_wishlist import TestWishlist
def suite():
"""
Define suite
"""
test_suite = trytond.tests.test_tryton.suite()
test_suite.addTests([
unittest.TestLoader().loadTestsFromTestCase(TestWishlist),
])
return test_suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
fulfilio/nereid-wishlist
|
tests/__init__.py
|
Python
|
bsd-3-clause
| 455
|
#!/usr/bin/python3
import sys
import subprocess
repoquery = ['repoquery', '--plugins', '--resolve', '--qf',
'%{name}.%{arch} %{repoid} %{location}', '--plugins', '-R']
package_info = dict()
def check_dep(packages):
#print(packages)
if len(packages) == 0:
return
cmd = repoquery + packages
output = subprocess.check_output(cmd).decode("utf-8")
wait_for_checking = []
for line in output.split('\n'):
if len(line) == 0:
continue
(package_name, repoid, location) = line.split(' ')
if (repoid != 'InstallMedia' and
package_name not in package_info):
package_info[package_name] = (repoid, location)
wait_for_checking.append(package_name)
check_dep(wait_for_checking)
check_dep(sys.argv[1:])
for package in package_info:
print(package_info[package][1])
|
Zor-X-L/offline-utils
|
rhel6-utils/repoquery-recursive.py
|
Python
|
mit
| 815
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
freeseer - vga/presentation capture software
Copyright (C) 2011 Free and Open Source Software Learning Centre
http://fosslc.org
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
For support, questions, suggestions or any other inquiries, visit:
http://wiki.github.com/Freeseer/freeseer/
@author: Thanh Ha
'''
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
from freeseer.frontend.qtcommon.dpi_adapt_qtgui import QWidgetWithDpi
from freeseer.frontend.qtcommon import resource # noqa
class ConfigToolWidget(QWidgetWithDpi):
'''
classdocs
'''
def __init__(self, parent=None):
'''
Constructor
'''
super(ConfigToolWidget, self).__init__(parent)
self.setMinimumSize(800, 460)
self.mainLayout = QtGui.QHBoxLayout()
self.setLayout(self.mainLayout)
#
# Left panel
#
self.leftPanelLayout = QtGui.QVBoxLayout()
self.mainLayout.addLayout(self.leftPanelLayout)
self.optionsTreeWidget = QtGui.QTreeWidget()
self.optionsTreeWidget.setSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Minimum)
self.optionsTreeWidget.setHeaderHidden(True)
self.optionsTreeWidget.headerItem().setText(0, "1")
# General
QtGui.QTreeWidgetItem(self.optionsTreeWidget)
self.optionsTreeWidget.topLevelItem(0).setText(0, "General")
# AV
QtGui.QTreeWidgetItem(self.optionsTreeWidget)
self.optionsTreeWidget.topLevelItem(1).setText(0, "AV Config")
# Plugins
QtGui.QTreeWidgetItem(self.optionsTreeWidget)
self.optionsTreeWidget.topLevelItem(2).setText(0, "Plugins")
# About
QtGui.QTreeWidgetItem(self.optionsTreeWidget)
self.optionsTreeWidget.topLevelItem(3).setText(0, "About")
closeIcon = QtGui.QIcon.fromTheme("application-exit")
self.closePushButton = QtGui.QPushButton("Close")
self.closePushButton.setIcon(closeIcon)
self.leftPanelLayout.addWidget(self.optionsTreeWidget)
self.leftPanelLayout.addWidget(self.closePushButton)
self.optionsTreeWidget.expandAll()
#
# Right panel
#
self.rightPanelWidget = QtGui.QWidget()
self.rightPanelWidget.setSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.MinimumExpanding)
self.mainLayout.addWidget(self.rightPanelWidget)
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
main = ConfigToolWidget()
main.show()
sys.exit(app.exec_())
|
Freeseer/freeseer
|
src/freeseer/frontend/configtool/ConfigToolWidget.py
|
Python
|
gpl-3.0
| 3,246
|
import SSD1331
import time
import datetime
device = SSD1331.SSD1331()
def test(device):
r, g, b = (48, 213, 200)
rd, gd, bd = (1,1,1)
arr = []
for x in range(9000):
color = device.color565_fast(r, g, b)
r +=1 * rd
g +=2 * gd
b +=3 * bd
if r > 255:
r = 0
rd = 0 - rd
if g > 255:
g = 0
gd = 0 - gd
if b > 255:
b = 0
bd = 0 - bd
arr.extend([(color >> 8) & 0xFF, color & 0xFF])
device.write_many_pixels(arr)
#time.sleep(5)
t = datetime.datetime.now()
arr = []
for x in range(900000):
color = device.color565_fast(r, g, b)
print datetime.datetime.now() - t
t = datetime.datetime.now()
arr = []
for x in range(900000):
color = device.color565(r, g, b)
print datetime.datetime.now() - t
test(device)
device.remove()
|
Zurek87/SSD1331_rpi
|
test.py
|
Python
|
mit
| 997
|
# exceptions.py
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""exception classes"""
import traceback, sys, re
from mako import util
class MakoException(Exception):
pass
class RuntimeException(MakoException):
pass
def _format_filepos(lineno, pos, filename):
if filename is None:
return " at line: %d char: %d" % (lineno, pos)
else:
return " in file '%s' at line: %d char: %d" % (filename, lineno, pos)
class CompileException(MakoException):
def __init__(self, message, source, lineno, pos, filename):
MakoException.__init__(self, message + _format_filepos(lineno, pos, filename))
self.lineno =lineno
self.pos = pos
self.filename = filename
self.source = source
class SyntaxException(MakoException):
def __init__(self, message, source, lineno, pos, filename):
MakoException.__init__(self, message + _format_filepos(lineno, pos, filename))
self.lineno =lineno
self.pos = pos
self.filename = filename
self.source = source
class UnsupportedError(MakoException):
"""raised when a retired feature is used."""
class TemplateLookupException(MakoException):
pass
class TopLevelLookupException(TemplateLookupException):
pass
class RichTraceback(object):
"""pulls the current exception from the sys traceback and extracts Mako-specific
template information.
Usage:
RichTraceback()
Properties:
error - the exception instance.
message - the exception error message as unicode
source - source code of the file where the error occured. if the error occured within a compiled template,
this is the template source.
lineno - line number where the error occured. if the error occured within a compiled template, the line number
is adjusted to that of the template source
records - a list of 8-tuples containing the original python traceback elements, plus the
filename, line number, source line, and full template source for the traceline mapped back to its originating source
template, if any for that traceline (else the fields are None).
reverse_records - the list of records in reverse
traceback - a list of 4-tuples, in the same format as a regular python traceback, with template-corresponding
traceback records replacing the originals
reverse_traceback - the traceback list in reverse
"""
def __init__(self, error=None, traceback=None):
self.source, self.lineno = "", 0
if error is None or traceback is None:
t, value, tback = sys.exc_info()
if error is None:
error = value or t
if traceback is None:
traceback = tback
self.error = error
self.records = self._init(traceback)
if isinstance(self.error, (CompileException, SyntaxException)):
import mako.template
self.source = self.error.source
self.lineno = self.error.lineno
self._has_source = True
self._init_message()
def _init_message(self):
"""Find a unicode representation of self.error"""
try:
self.message = unicode(self.error)
except UnicodeError:
try:
self.message = str(self.error)
except UnicodeEncodeError:
# Fallback to args as neither unicode nor
# str(Exception(u'\xe6')) work in Python < 2.6
self.message = self.error.args[0]
if not isinstance(self.message, unicode):
self.message = unicode(self.message, 'ascii', 'replace')
def _get_reformatted_records(self, records):
for rec in records:
if rec[6] is not None:
yield (rec[4], rec[5], rec[2], rec[6])
else:
yield tuple(rec[0:4])
@property
def traceback(self):
"""return a list of 4-tuple traceback records (i.e. normal python format)
with template-corresponding lines remapped to the originating template
"""
return list(self._get_reformatted_records(self.records))
@property
def reverse_records(self):
return reversed(self.records)
@property
def reverse_traceback(self):
"""return the same data as traceback, except in reverse order
"""
return list(self._get_reformatted_records(self.reverse_records))
def _init(self, trcback):
"""format a traceback from sys.exc_info() into 7-item tuples, containing
the regular four traceback tuple items, plus the original template
filename, the line number adjusted relative to the template source, and
code line from that line number of the template."""
import mako.template
mods = {}
rawrecords = traceback.extract_tb(trcback)
new_trcback = []
for filename, lineno, function, line in rawrecords:
try:
(line_map, template_lines) = mods[filename]
except KeyError:
try:
info = mako.template._get_module_info(filename)
module_source = info.code
template_source = info.source
template_filename = info.template_filename or filename
except KeyError:
# A normal .py file (not a Template)
if not util.py3k:
try:
fp = open(filename, 'rb')
encoding = util.parse_encoding(fp)
fp.close()
except IOError:
encoding = None
if encoding:
line = line.decode(encoding)
else:
line = line.decode('ascii', 'replace')
new_trcback.append((filename, lineno, function, line,
None, None, None, None))
continue
template_ln = module_ln = 1
line_map = {}
for line in module_source.split("\n"):
match = re.match(r'\s*# SOURCE LINE (\d+)', line)
if match:
template_ln = int(match.group(1))
else:
template_ln += 1
module_ln += 1
line_map[module_ln] = template_ln
template_lines = [line for line in template_source.split("\n")]
mods[filename] = (line_map, template_lines)
template_ln = line_map[lineno]
if template_ln <= len(template_lines):
template_line = template_lines[template_ln - 1]
else:
template_line = None
new_trcback.append((filename, lineno, function,
line, template_filename, template_ln,
template_line, template_source))
if not self.source:
for l in range(len(new_trcback)-1, 0, -1):
if new_trcback[l][5]:
self.source = new_trcback[l][7]
self.lineno = new_trcback[l][5]
break
else:
try:
# A normal .py file (not a Template)
fp = open(new_trcback[-1][0], 'rb')
encoding = util.parse_encoding(fp)
fp.seek(0)
self.source = fp.read()
fp.close()
if encoding:
self.source = self.source.decode(encoding)
except IOError:
self.source = ''
self.lineno = new_trcback[-1][1]
return new_trcback
def text_error_template(lookup=None):
"""provides a template that renders a stack trace in a similar format to the Python interpreter,
substituting source template filenames, line numbers and code for that of the originating
source template, as applicable."""
import mako.template
return mako.template.Template(r"""
<%page args="error=None, traceback=None"/>
<%!
from mako.exceptions import RichTraceback
%>\
<%
tback = RichTraceback(error=error, traceback=traceback)
%>\
Traceback (most recent call last):
% for (filename, lineno, function, line) in tback.traceback:
File "${filename}", line ${lineno}, in ${function or '?'}
${line | unicode.strip}
% endfor
${str(tback.error.__class__.__name__)}: ${tback.message}
""")
def html_error_template():
"""provides a template that renders a stack trace in an HTML format, providing an excerpt of
code as well as substituting source template filenames, line numbers and code
for that of the originating source template, as applicable.
the template's default encoding_errors value is 'htmlentityreplace'. the template has
two options:
with the full option disabled, only a section of an HTML document is returned.
with the css option disabled, the default stylesheet won't be included."""
import mako.template
return mako.template.Template(r"""
<%!
from mako.exceptions import RichTraceback
%>
<%page args="full=True, css=True, error=None, traceback=None"/>
% if full:
<html>
<head>
<title>Mako Runtime Error</title>
% endif
% if css:
<style>
body { font-family:verdana; margin:10px 30px 10px 30px;}
.stacktrace { margin:5px 5px 5px 5px; }
.highlight { padding:0px 10px 0px 10px; background-color:#9F9FDF; }
.nonhighlight { padding:0px; background-color:#DFDFDF; }
.sample { padding:10px; margin:10px 10px 10px 10px; font-family:monospace; }
.sampleline { padding:0px 10px 0px 10px; }
.sourceline { margin:5px 5px 10px 5px; font-family:monospace;}
.location { font-size:80%; }
</style>
% endif
% if full:
</head>
<body>
% endif
<h2>Error !</h2>
<%
tback = RichTraceback(error=error, traceback=traceback)
src = tback.source
line = tback.lineno
if src:
lines = src.split('\n')
else:
lines = None
%>
<h3>${str(tback.error.__class__.__name__)}: ${tback.message}</h3>
% if lines:
<div class="sample">
<div class="nonhighlight">
% for index in range(max(0, line-4),min(len(lines), line+5)):
% if index + 1 == line:
<div class="highlight">${index + 1} ${lines[index] | h}</div>
% else:
<div class="sampleline">${index + 1} ${lines[index] | h}</div>
% endif
% endfor
</div>
</div>
% endif
<div class="stacktrace">
% for (filename, lineno, function, line) in tback.reverse_traceback:
<div class="location">${filename}, line ${lineno}:</div>
<div class="sourceline">${line | h}</div>
% endfor
</div>
% if full:
</body>
</html>
% endif
""", output_encoding=sys.getdefaultencoding(), encoding_errors='htmlentityreplace')
|
sadanandb/pmt
|
src/mako/exceptions.py
|
Python
|
epl-1.0
| 11,269
|
# Copyright 2019 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import re
import os
import jinja2
import boto3
import logging
import argparse
import time
import datetime
import jsonschema
c7n_data = {}
def create_html_file(config):
""" You can customize the automated documentation by altering
the code directly in this script or the associated jinja2 template
"""
logging.debug("Starting create_html_file")
logging.debug(
"\tjinja2_template_file = {}"
.format(config['jinja2_template_filename']))
logging.debug(
"\ttrendered_filename = {}"
.format(config['rendered_filename']))
ts = time.time()
timestamp = datetime.datetime.utcfromtimestamp(ts).strftime(
'%Y-%m-%d %H:%M:%S')
script_path = os.path.dirname(os.path.abspath(__file__))
rendered_file_path = os.path.join(
script_path, config['rendered_filename'])
environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(script_path))
environment_column = True if config['environment_tags'] else False
render_vars = {
"timestamp": timestamp,
"c7n_data": c7n_data,
"environment_column": environment_column,
"environment_tags": config['environment_tags']
}
with open(rendered_file_path, "w") as result_file:
result_file.write(
environment.get_template(config['jinja2_template_filename'])
.render(render_vars))
logging.debug("File created: %s", rendered_file_path)
return rendered_file_path
def get_file_url(path, config):
""" Update this function to help build the link to your file
"""
file_url_regex = re.compile(config['file_url_regex'])
new_path = re.sub(file_url_regex, config['file_url_base'], path)
return new_path
def gather_file_data(config):
""" Gather policy information from files
"""
file_regex = re.compile(config['file_regex'])
category_regex = re.compile(config['category_regex'])
policies = {}
for root, dirs, files in os.walk(config['c7n_policy_directory']):
for file in files:
if file_regex.match(file):
file_path = root + '/' + file
logging.debug('Processing file %s', file_path)
with open(file_path, 'r') as stream:
try:
if category_regex.search(file_path):
category = 'Security & Governance'
else:
category = 'Cost Controls'
policies = yaml.load(stream)
for policy in policies['policies']:
logging.debug(
'Processing policy %s', policy['name'])
policy['file_url'] = get_file_url(
file_path, config)
resource_type = policy['resource']
if category not in c7n_data:
c7n_data[category] = {}
if resource_type not in c7n_data[category]:
c7n_data[category][resource_type] = []
c7n_data[category][resource_type].append(policy)
except yaml.YAMLError as exc:
logging.error(exc)
def upload_to_s3(file_path, config):
""" Upload html file to S3
"""
logging.info("Uploading file to S3 bucket: %s", config['s3_bucket_name'])
s3 = boto3.resource('s3')
s3_filename = config['s3_bucket_path'] + config['rendered_filename']
s3.Bucket(config['s3_bucket_name']).upload_file(
file_path, s3_filename, ExtraArgs={
'ContentType': 'text/html', 'ACL': 'public-read'})
def validate_inputs(config):
CONFIG_SCHEMA = {
'type': 'object',
'additionalProperties': False,
'required': [
'c7n_policy_directory',
'file_regex',
'jinja2_template_filename',
'rendered_filename',
'category_regex'
],
'properties': {
'jinja2_template_filename': {'type': 'string'},
'rendered_filename': {'type': 'string'},
'c7n_policy_directory': {'type': 'string'},
'file_regex': {'type': 'string'},
'category_regex': {'type': 'string'},
'file_url_base': {'type': 'string'},
'file_url_regex': {'type': 'string'},
's3_bucket_name': {'type': 'string'},
's3_bucket_path': {'type': 'string'},
'environment_tags': {'type': 'object'}
}
}
jsonschema.validate(config, CONFIG_SCHEMA)
logging.info("Successfully validated configuration file")
def main():
parser = argparse.ArgumentParser(
description='Automatic policy documentation for Cloud Custodian.')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument(
'-c', '--config_filename',
required=True,
dest='config_filename',
help='YAML config filename')
args = parser.parse_args()
with open(args.config_filename) as fh:
config_tmp = yaml.load(fh.read(), Loader=yaml.SafeLoader)
config = config_tmp['config']
validate_inputs(config)
logging_format = '%(asctime)s %(levelname)-4s %(message)s'
if args.verbose:
logging.basicConfig(level=logging.DEBUG, format=logging_format)
else:
logging.basicConfig(level=logging.INFO, format=logging_format)
gather_file_data(config)
rendered_file = create_html_file(config)
if 's3_bucket_name' in config:
upload_to_s3(rendered_file, config)
if __name__ == '__main__':
main()
|
FireballDWF/cloud-custodian
|
tools/sandbox/c7n_autodoc/c7n-autodoc.py
|
Python
|
apache-2.0
| 6,289
|
import math
def linear(pos):
return pos
def quadIn(pos):
return pow(pos, 2)
def quadOut(pos):
return -(pow((pos - 1), 2) -1)
def quadInOut(pos):
pos /= 0.5
if pos < 1:
return 0.5 * pow(pos, 2);
pos -= 2
return -0.5 * (pos * pos - 2);
def sineIn(pos):
return -math.cos(pos * (math.pi / 2)) + 1
def sineOut(pos):
return math.sin(pos * (math.pi / 2))
def sineInOut(pos):
return (-0.5 * (math.cos(math.pi * pos) - 1))
def circIn(pos):
return -(math.sqrt(1 - (pos * pos)) - 1)
def circOut(pos):
return math.sqrt(1 - pow(pos - 1, 2))
def circInOut(pos):
pos /= 0.5
if pos < 1:
return -0.5 * (math.sqrt(1 - pos * pos) - 1)
pos -= 2
return 0.5 * (math.sqrt(1 - pos*pos) + 1)
def backIn(pos):
s = 1.70158
return pos * pos * ((s + 1) * pos - s)
def backOut(pos):
s = 1.70158
pos = pos - 1
return pos * pos * ((s + 1) * pos + s) + 1
def backInOut(pos):
s = 1.70158 * 1.525
pos /= 0.5
if pos < 1:
return 0.5 * (pos * pos * ((s + 1) * pos - s))
pos -= 2
return 0.5 * (pos * pos * ((s + 1) * pos +s) + 2)
def swingFrom(pos):
s = 1.70158
return pos * pos * ((s + 1) * pos - s)
def swingTo(pos):
s = 1.70158
pos -= 1
return pos * pos * ((s + 1) * pos + s) + 1
def swingFromTo(pos):
s = 1.70158 * 1.525
pos /= 0.5
if pos < 1:
return 0.5 * (pos * pos * ((s + 1) * pos - s))
pos -= 2
return 0.5 * (pos * pos * ((s + 1) * pos + s) + 2)
def elastic(pos):
return -1 * pow(4, -8 * pos) * math.sin((pos * 6 - 1) * (2 * math.pi) / 2) + 1
def bounce(pos):
if pos < (1 / 2.75):
return 7.5625 * pos * pos
elif pos < (2 / 2.75):
pos -= (1.5/2.75)
return 7.5625 * pos * pos + 0.75
elif (pos < (2.5 / 2.75)):
pos -= 2.25 / 2.75
return 7.5625 * pos * pos + 0.9375
else:
pos -= 2.625 / 2.75
return 7.5625 * (pos) * pos + 0.984375
EASINGS = [
("linear", linear),
("quadIn", quadIn),
("quadOut", quadOut),
("quadInOut", quadInOut),
("sineIn", sineIn),
("sineOut", sineOut),
("sineInOut", sineInOut),
("circIn", circIn),
("circOut", circOut),
("circInOut", circInOut),
("backIn", backIn),
("backOut", backOut),
("backInOut", backInOut),
("swingFrom", swingFrom),
("swingTo", swingTo),
("swingFromTo", swingFromTo),
("elastic", elastic),
("bounce", bounce)
]
EASINGS.sort(key=lambda e: e[0])
MAP = {}
for name, func in EASINGS:
MAP[name] = func
def getNames():
return [e[0] for e in EASINGS]
def getEasing(name):
return MAP[name]
|
bitsawer/renpy-shader
|
ShaderDemo/game/shader/easing.py
|
Python
|
mit
| 2,658
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mongoengine as me
from st2common.constants.keyvalue import FULL_SYSTEM_SCOPE
from st2common.constants.types import ResourceType
from st2common.models.db import MongoDBAccess
from st2common.models.db import stormbase
__all__ = [
'KeyValuePairDB'
]
class KeyValuePairDB(stormbase.StormBaseDB, stormbase.UIDFieldMixin):
"""
Attribute:
name: Name of the key.
value: Arbitrary value to be stored.
"""
RESOURCE_TYPE = ResourceType.KEY_VALUE_PAIR
UID_FIELDS = ['scope', 'name']
scope = me.StringField(default=FULL_SYSTEM_SCOPE, unique_with='name')
name = me.StringField(required=True)
value = me.StringField()
secret = me.BooleanField(default=False)
expire_timestamp = me.DateTimeField()
meta = {
'indexes': [
{
'fields': ['expire_timestamp'],
'expireAfterSeconds': 0
}
] + stormbase.UIDFieldMixin.get_indexes()
}
def __init__(self, *args, **values):
super(KeyValuePairDB, self).__init__(*args, **values)
self.uid = self.get_uid()
# specialized access objects
keyvaluepair_access = MongoDBAccess(KeyValuePairDB)
MODELS = [KeyValuePairDB]
|
lakshmi-kannan/st2
|
st2common/st2common/models/db/keyvalue.py
|
Python
|
apache-2.0
| 1,989
|
import time
import RPi.GPIO as GPIO
from Adafruit_BME280 import *
led_output = 27
sensor = BME280(t_mode=BME280_OSAMPLE_8, p_mode=BME280_OSAMPLE_8, h_mode=BME280_OSAMPLE_8)
GPIO.setmode(GPIO.BCM)
GPIO.setup(led_output, GPIO.OUT)
led = False
while True:
degrees = sensor.read_temperature()
print('Temp = ' + str(round(degrees)) + ' C')
if degrees >= 20 :
led = True
else :
led = False
GPIO.output(led_output, led)
time.sleep(0.5)
|
gdmgent-1718-wot/Hardware
|
rpi-gpio/BMP280Sensor/BMP280.py
|
Python
|
apache-2.0
| 480
|
#! /usr/bin/env python3
import argparse
import asyncio
import ast
import sys
from colorama import init as colorama_init, Fore, Style
class NotSupportedNodeError(ValueError): pass
def attr2str(node):
if isinstance(node, ast.Attribute):
if isinstance(node.value, ast.Attribute):
value = attr2str(node.value)
elif isinstance(node.value, ast.Name):
value = node.value.id
else:
raise NotSupportedNodeError()
return value + '.' + attr2str(node.attr)
elif isinstance(node, str):
return node
elif isinstance(node, ast.Name):
return node.id
else:
raise NotSupportedNodeError()
class ImportRetriever(ast.NodeVisitor):
def __init__(self):
super().__init__()
self._user_globals = dict()
self._user_locals = dict()
def visit_Import(self, node):
temp_mod = ast.Module()
temp_mod.body = [node]
exec(compile(temp_mod, '<unknown>', 'exec'), self._user_globals, self._user_locals)
self.generic_visit(node)
class CoroutineDefFinder(ast.NodeVisitor):
def __init__(self):
super().__init__()
self._parent_class = []
self._parent_func = []
self._scopes = []
self._scoped_coros = set()
self._scoped_types = dict()
self._level = 0
def visit_ClassDef(self, node):
self._parent_class.append((node.name, self._level))
self._scopes.append(node.name)
self.generic_visit(node)
def visit_FunctionDef(self, node):
self._parent_func.append((node.name, self._level))
for deco_node in node.decorator_list:
try:
deco_attr = attr2str(deco_node)
if deco_attr == 'asyncio.coroutine':
scoped_coro_sig = '.'.join(self._scopes) + '.' + node.name
self._scoped_coros.add(scoped_coro_sig)
print(Fore.YELLOW + scoped_coro_sig + Fore.RESET)
except NotSupportedNodeError:
pass
self._scopes.append(node.name)
for arg in node.args.args:
if arg.annotation:
scoped_var = '.'.join(self._scopes) + '.' + arg.arg
self._scoped_types[scoped_var] = attr2str(arg.annotation)
# TODO: cover keyword arguments
self.generic_visit(node)
def visit_Assign(self, node):
if isinstance(node.value, ast.Attribute) or isinstance(node.value, ast.Name):
target = attr2str(node.targets[0])
value = attr2str(node.value)
if target.startswith('self.'):
scoped_target = '.'.join(self._scopes[:-1]) + '.' + target[5:]
else:
scoped_target = '.'.join(self._scopes) + '.' + target
scoped_value = '.'.join(self._scopes) + '.' + value
t = self._scoped_types.get(scoped_value, None)
if t is not None:
self._scoped_types[scoped_target] = t
#print(scoped_target, ' = ', scoped_value, '|', t)
self.generic_visit(node)
def visit(self, node):
self._level += 1
super().visit(node)
if self._parent_class:
if self._parent_class[-1][1] == self._level:
self._scopes.pop()
self._parent_class.pop()
if self._parent_func:
if self._parent_func[-1][1] == self._level:
self._scopes.pop()
self._parent_func.pop()
self._level -= 1
class CoroutineChecker(ast.NodeVisitor):
def __init__(self, ns, scoped_coro_sigs, scoped_types):
super().__init__()
self._ns = ns
self._yield_from = False
self._parent_class = []
self._parent_func = []
self._scopes = []
self._scoped_coro_sigs = scoped_coro_sigs
self._scoped_types = scoped_types
self._level = 0
def check_if_coroutine(self, callee):
try:
e = eval(callee, {}, self._ns)
return asyncio.iscoroutinefunction(e) or asyncio.iscoroutine(e)
except NameError:
pass
if callee.startswith('self.'):
target = '.'.join(self._scopes[:-1]) + '.' + callee[5:]
else:
target = '.'.join(self._scopes) + '.' + callee
while True:
try:
t = self._scoped_types.get(target, None)
if t:
e = eval(t, {}, self._ns)
return asyncio.iscoroutinefunction(e) or asyncio.iscoroutine(e)
else:
if '.' not in target: break
target = '.'.join(target.split('.')[:-1])
except NameError:
break
try:
if callee.startswith('self.'):
target = '.'.join(self._scopes[:-1]) + '.' + callee[5:]
else:
target = '.'.join(self._scopes) + '.' + callee
print(target)
ret = target in self._scoped_coro_sigs
return ret
except KeyError:
pass
#try:
# ret = self._lexical_scope_coro[callee]
# return ret
#except KeyError:
# #print('lsc fail:', callee, self._lexical_scope_coro)
# pass
return False
def visit_ClassDef(self, node):
self._parent_class.append((node.name, self._level))
self._scopes.append(node.name)
self.generic_visit(node)
def visit_FunctionDef(self, node):
self._parent_func.append((node.name, self._level))
self._scopes.append(node.name)
self.generic_visit(node)
def visit_YieldFrom(self, node):
print(Fore.YELLOW + Style.BRIGHT + 'yield from' + Style.RESET_ALL + Fore.RESET, end=' ')
self._yield_from = True
self.generic_visit(node)
def visit_Call(self, node):
try:
callee = attr2str(node.func)
print(Fore.YELLOW + callee + Fore.RESET)
is_coroutine = self.check_if_coroutine(callee)
print(' ', end='')
if is_coroutine:
if self._yield_from:
print(Fore.GREEN, end='')
else:
print(Fore.RED, end='')
print(callee, 'is coroutine', end='')
else:
if self._yield_from:
print(Fore.RED, end='')
else:
print(Fore.GREEN, end='')
print(callee, 'is not coroutine', end='')
print(Fore.RESET)
except NotSupportedNodeError:
pass
finally:
self._yield_from = False
self.generic_visit(node)
def visit(self, node):
self._level += 1
super().visit(node)
if self._parent_class:
if self._parent_class[-1][1] == self._level:
self._scopes.pop()
self._parent_class.pop()
if self._parent_func:
if self._parent_func[-1][1] == self._level:
self._scopes.pop()
self._parent_func.pop()
self._level -= 1
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument('filename', type=str)
args = argparser.parse_args()
colorama_init()
with open(args.filename, 'r') as fin:
node = ast.parse(fin.read())
ir = ImportRetriever()
ir.visit(node)
cd = CoroutineDefFinder()
cd.visit(node)
print(cd._scoped_coros)
print(cd._scoped_types)
cc = CoroutineChecker(ir._user_locals, cd._scoped_coros, cd._scoped_types)
cc.visit(node)
if __name__ == '__main__':
main()
|
lablup/coroutine-check
|
check.py
|
Python
|
mit
| 7,691
|
from rdkit import Chem
from numpy.testing import assert_almost_equal
from mordred.SLogP import SMR, SLogP
def test_WildmanCrippen1():
mol = Chem.MolFromSmiles("Oc1ccccc1OC")
yield assert_almost_equal, SLogP()(mol), 1.4, 2
yield assert_almost_equal, SMR()(mol), 34.66, 2
def test_WildmanCrippen2():
mol = Chem.MolFromSmiles("c1ccccc1c2ccccn2")
yield assert_almost_equal, SLogP()(mol), 2.75, 2
|
mordred-descriptor/mordred
|
mordred/tests/test_SLogP.py
|
Python
|
bsd-3-clause
| 417
|
from twisted.web import resource
import headers
class DynamicOptions(resource.Resource):
def getChild(self, name, request):
return self
"""
Get HTTP Options
"""
def render_OPTIONS(self, request):
headers.setContentHeaders(request)
headers.setAccessControlHeaders(request)
request.setResponseCode(200)
return ""
|
michaelmp/web-res-scaf
|
web/options.py
|
Python
|
agpl-3.0
| 344
|
#!/usr/bin/env python
import base64
import itertools
import os
import argparse
from bugzilla.models import Bug, Attachment, Flag, User, Comment
from bugzilla.agents import BugzillaAgent
from bugzilla.utils import urljoin, qs, get_credentials, FILE_TYPES
REVIEW = 4
class AttachmentAgent(BugzillaAgent):
"""Stores credentials, navigates the site."""
def attach(self, bug_id, filename, description, patch=False,
reviewer=None, comment='', content_type='text/plain'):
"""Create an attachment, add a comment, obsolete other attachments."""
print 'Adding "%s" to %s' % (filename, bug_id)
self._attach(bug_id, filename, description, patch,
reviewer, content_type)
bug = self.get_bug(bug_id)
if comment:
print 'Adding the comment'
self._comment(bug_id, comment)
print 'Finding attachments to make obsolete...'
self.obsolete(bug)
def _attach(self, bug_id, filename, description, is_patch=False,
reviewer=None, content_type='text/plain'):
"""Create a new attachment."""
fields = {
'data': base64.b64encode(open(filename).read()),
'encoding': 'base64',
'file_name': filename,
'content_type': content_type,
'description': description,
'is_patch': is_patch,
}
if reviewer is not None:
fields['flags'] = [Flag(type_id=REVIEW, status='?',
requestee=User(name=reviewer))]
url = urljoin(self.API_ROOT, 'bug/%s/attachment?%s' % (bug_id, self.qs()))
return Attachment(**fields).post_to(url)
def _comment(self, bug_id, comment):
"""Create a new comment."""
url = urljoin(self.API_ROOT, 'bug/%s/comment?%s' % (bug_id, self.qs()))
return Comment(text=comment).post_to(url)
def obsolete(self, bug):
"""Ask what attachments should be obsoleted."""
attachments = [a for a in bug.attachments
if not bool(int(a.is_obsolete))]
if not attachments:
return
print "What attachments do you want to obsolete?"
msg = '[{index}] {a.id}: "{a.description}" ({a.file_name})'
for index, a in enumerate(attachments):
print msg.format(index=index, a=a)
numbers = raw_input('Enter the numbers (space-separated) of '
'attachments to make obsolete:\n').split()
if not numbers:
return
map_ = dict((str(index), a) for index, a in enumerate(attachments))
for num, _ in itertools.groupby(sorted(numbers)):
try:
self._obsolete(map_[num])
except KeyError:
pass
def _obsolete(self, attachment):
"""Mark an attachment obsolete."""
print "Obsoleting", attachment
attachment.is_obsolete = True
attachment._location += '?%s' % self.qs()
attachment.put()
def main():
# Script options
parser = argparse.ArgumentParser(description='Submit Bugzilla attachments')
parser.add_argument('bug_id',
type=int,
metavar='BUG',
help='Bug number')
parser.add_argument('filename',
metavar='FILE',
help='File to upload')
parser.add_argument('--description',
help='Attachment description',
required=True)
parser.add_argument('--patch',
action='store_true',
help='Is this a patch?')
parser.add_argument('--reviewer',
help='Bugzilla name of someone to r?')
parser.add_argument('--comment',
help='Comment for the attachment')
parser.add_argument('--content_type',
choices=FILE_TYPES,
help="File's content_type")
args = parser.parse_args()
if args.content_type:
args.content_type = FILE_TYPES[args.content_type]
# Get the API root, default to bugzilla.mozilla.org
API_ROOT = os.environ.get('BZ_API_ROOT',
'https://api-dev.bugzilla.mozilla.org/latest/')
# Authenticate
username, password = get_credentials()
# Load the agent
bz = AttachmentAgent(API_ROOT, username, password)
# Attach the file
bz.attach(**dict(args._get_kwargs()))
if __name__ == '__main__':
main()
|
LegNeato/bztools
|
scripts/attach.py
|
Python
|
bsd-3-clause
| 4,585
|
# Modules required for the functions here
import sys
import inspect
import numpy
import math
import copy
import random
import collections
import functools
# Find the require libraries for Windows. This change was prompted by how
# shared libaries are linked in Python 3.8
import ctypes
import ctypes.util
if sys.platform == 'win32':
try:
libs = [
'libstdc++-6.dll',
'libgcc_s_seh-1.dll',
'libgomp-1.dll',
'libquadmath-0.dll',
'libwinpthread-1.dll',
'libblas.dll',
'liblapack.dll',
'libjsoncpp.dll',
'liboptizelle.dll']
for lib in libs:
ctypes.cdll.LoadLibrary(ctypes.util.find_library(lib))
except TypeError:
raise OSError()
# Import the Optizelle pieces, which actually depend on this module
from Optizelle.Enumerated import *
from Optizelle.Functions import *
import Optizelle.Unconstrained.State
import Optizelle.Unconstrained.Functions
import Optizelle.Unconstrained.Algorithms
import Optizelle.Unconstrained.Restart
import Optizelle.EqualityConstrained.State
import Optizelle.EqualityConstrained.Functions
import Optizelle.EqualityConstrained.Algorithms
import Optizelle.EqualityConstrained.Restart
import Optizelle.InequalityConstrained.State
import Optizelle.InequalityConstrained.Functions
import Optizelle.InequalityConstrained.Algorithms
import Optizelle.InequalityConstrained.Restart
import Optizelle.Constrained.State
import Optizelle.Constrained.Functions
import Optizelle.Constrained.Algorithms
import Optizelle.Constrained.Restart
import Optizelle.json.Serialization
import Optizelle.json.Unconstrained
import Optizelle.json.EqualityConstrained
import Optizelle.json.InequalityConstrained
import Optizelle.json.Constrained
import Optizelle.Messaging
import Optizelle.Exception
__all__ = [
"Unconstrained",
"InequalityConstrained",
"EqualityConstrained",
"Constrained",
"Utility"
"TruncatedStop",
"AlgorithmClass",
"OptimizationStop",
"Operators",
"LineSearchDirection",
"LineSearchKind",
"OptimizationLocation",
"ProblemClass",
"FunctionDiagnostics",
"DiagnosticScheme",
"ScalarValuedFunction",
"VectorValuedFunction",
"Operator",
"StateManipulator",
"Exception",
"Messaging",
"Rm"
]
__doc__ = "Optizelle optimization library"
class Rm(object):
"""Vector space for the nonnegative orthant. For basic vectors in R^m, use this."""
@staticmethod
def init(x):
"""Memory allocation and size setting"""
return copy.deepcopy(x)
@staticmethod
def copy(x,y):
"""y <- x (Shallow. No memory allocation.)"""
numpy.copyto(y,x)
@staticmethod
def scal(alpha,x):
"""x <- alpha * x"""
x.__imul__(alpha)
@staticmethod
def zero(x):
"""x <- 0"""
x.fill(0.)
@staticmethod
def axpy(alpha,x,y):
"""y <- alpha * x + y"""
y.__iadd__(alpha*x)
@staticmethod
def innr(x,y):
"""<- <x,y>"""
return numpy.inner(x,y)
@staticmethod
def rand(x):
"""x <- random"""
numpy.copyto(x,numpy.vectorize(lambda x:random.normalvariate(0.,1.))(x))
@staticmethod
def prod(x,y,z):
"""Jordan product, z <- x o y"""
numpy.copyto(z,x*y)
@staticmethod
def id(x):
"""Identity element, x <- e such that x o e = x"""
x.fill(1.)
@staticmethod
def linv(x,y,z):
"""Jordan product inverse, z <- inv(L(x)) y where L(x) y = x o y"""
numpy.copyto(z,numpy.divide(y,x))
@staticmethod
def barr(x):
"""Barrier function, <- barr(x) where x o grad barr(x) = e"""
if (x>0).all():
return functools.reduce(lambda x,y:x+math.log(y),x,0.)
else:
return float("nan")
@staticmethod
def srch(x,y):
"""Line search, <- argmax {alpha \in Real >= 0 : alpha x + y >= 0} where y > 0"""
alpha = float("inf")
for i in range(0,len(x)):
if x[i] < 0:
alpha0 = -y[i]/x[i]
if alpha0 < alpha:
alpha=alpha0
return alpha
@staticmethod
def symm(x):
"""Symmetrization, x <- symm(x) such that L(symm(x)) is a symmetric operator"""
pass
class RestartPackage(tuple):
"""Holds restart information"""
def __new__ (cls):
return super(RestartPackage,cls).__new__(cls,tuple([[],[]]))
def __init__(self):
super(RestartPackage,self).__init__(tuple([[],[]]))
|
OptimoJoe/Optizelle
|
src/python/Optizelle/__init__.py
|
Python
|
bsd-2-clause
| 4,597
|
# -*- coding: utf-8 -*-
"""Test utilities."""
#
# (C) Pywikibot team, 2013-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import print_function, unicode_literals
__version__ = '$Id$'
#
import inspect
import json
import os
import re
import subprocess
import sys
import time
import traceback
import warnings
from collections import Mapping
from warnings import warn
if sys.version_info[0] > 2:
import six
unicode = str
import pywikibot
from pywikibot import config
from pywikibot.comms import threadedhttp
from pywikibot.site import Namespace
from pywikibot.data.api import CachedRequest
from pywikibot.data.api import Request as _original_Request
from pywikibot.tools import PYTHON_VERSION
from tests import _pwb_py
from tests import unittest # noqa
class DrySiteNote(RuntimeWarning):
"""Information regarding dry site."""
pass
def expected_failure_if(expect):
"""
Unit test decorator to expect failure under conditions.
@param expect: Flag to check if failure is expected
@type expect: bool
"""
if expect:
return unittest.expectedFailure
else:
return lambda orig: orig
def allowed_failure(func):
"""
Unit test decorator to allow failure.
Test runners each have different interpretations of what should be
the result of an @expectedFailure test if it succeeds. Some consider
it to be a pass; others a failure.
This decorator runs the test and, if it is a failure, reports the result
and considers it a skipped test.
"""
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except AssertionError:
tb = traceback.extract_tb(sys.exc_info()[2])
for depth, line in enumerate(tb):
if re.match('^assert[A-Z]', line[2]):
break
tb = traceback.format_list(tb[:depth])
pywikibot.error('\n' + ''.join(tb)[:-1]) # remove \n at the end
raise unittest.SkipTest('Test is allowed to fail.')
except Exception:
pywikibot.exception(tb=True)
raise unittest.SkipTest('Test is allowed to fail.')
wrapper.__name__ = func.__name__
return wrapper
def allowed_failure_if(expect):
"""
Unit test decorator to allow failure under conditions.
@param expect: Flag to check if failure is allowed
@type expect: bool
"""
if expect:
return allowed_failure
else:
return lambda orig: orig
def add_metaclass(cls):
"""Call six's add_metaclass with the site's __metaclass__ in Python 3."""
if sys.version_info[0] > 2:
return six.add_metaclass(cls.__metaclass__)(cls)
else:
assert cls.__metaclass__
return cls
def fixed_generator(iterable):
"""Return a dummy generator ignoring all parameters."""
def gen(*args, **kwargs):
for item in iterable:
yield item
return gen
def entered_loop(iterable):
"""Return True if iterable contains items."""
for iterable_item in iterable:
return True
return False
class WarningSourceSkipContextManager(warnings.catch_warnings):
"""
Warning context manager that adjusts source of warning.
The source of the warning will be moved further down the
stack to skip a list of objects that have been monkey
patched into the call stack.
"""
def __init__(self, skip_list):
"""
Constructor.
@param skip_list: List of objects to be skipped
@type skip_list: list of object or (obj, str, int, int)
"""
super(WarningSourceSkipContextManager, self).__init__(record=True)
self.skip_list = skip_list
@property
def skip_list(self):
"""
Return list of filename and line ranges to skip.
@rtype: list of (obj, str, int, int)
"""
return self._skip_list
@skip_list.setter
def skip_list(self, value):
"""
Set list of objects to be skipped.
@param value: List of objects to be skipped
@type value: list of object or (obj, str, int, int)
"""
self._skip_list = []
for item in value:
if isinstance(item, tuple):
self._skip_list.append(item)
else:
filename = inspect.getsourcefile(item)
code, first_line = inspect.getsourcelines(item)
last_line = first_line + len(code)
self._skip_list.append(
(item, filename, first_line, last_line))
def __enter__(self):
"""Enter the context manager."""
def detailed_show_warning(*args, **kwargs):
"""warnings.showwarning replacement handler."""
entry = warnings.WarningMessage(*args, **kwargs)
skip_lines = 0
entry_line_found = False
for (_, filename, fileno, _, line, _) in inspect.stack():
if any(start <= fileno <= end
for (_, skip_filename, start, end) in self.skip_list
if skip_filename == filename):
if entry_line_found:
continue
else:
skip_lines += 1
if (filename, fileno) == (entry.filename, entry.lineno):
if not skip_lines:
break
entry_line_found = True
if entry_line_found:
if not skip_lines:
(entry.filename, entry.lineno) = (filename, fileno)
break
else:
skip_lines -= 1
log.append(entry)
log = super(WarningSourceSkipContextManager, self).__enter__()
self._module.showwarning = detailed_show_warning
return log
class DryParamInfo(dict):
"""Dummy class to use instead of L{pywikibot.data.api.ParamInfo}."""
def __init__(self, *args, **kwargs):
"""Constructor."""
super(DryParamInfo, self).__init__(*args, **kwargs)
self.modules = set()
self.action_modules = set()
self.query_modules = set()
self.query_modules_with_limits = set()
self.prefixes = set()
def fetch(self, modules, _init=False):
"""Prevented method."""
raise Exception(u'DryParamInfo.fetch(%r, %r) prevented'
% (modules, _init))
def parameter(self, module, param_name):
"""Load dry data."""
return self[module][param_name]
class DummySiteinfo():
"""Dummy class to use instead of L{pywikibot.site.Siteinfo}."""
def __init__(self, cache):
"""Constructor."""
self._cache = dict((key, (item, False)) for key, item in cache.items())
def __getitem__(self, key):
"""Get item."""
return self.get(key, False)
def __setitem__(self, key, value):
"""Set item."""
self._cache[key] = (value, False)
def get(self, key, get_default=True, cache=True, expiry=False):
"""Return dry data."""
# Default values are always expired, so only expiry=False doesn't force
# a reload
force = expiry is not False
if not force and key in self._cache:
loaded = self._cache[key]
if not loaded[1] and not get_default:
raise KeyError(key)
else:
return loaded[0]
elif get_default:
default = pywikibot.site.Siteinfo._get_default(key)
if cache:
self._cache[key] = (default, False)
return default
else:
raise KeyError(key)
def __contains__(self, key):
"""Return False."""
return False
def is_recognised(self, key):
"""Return None."""
return None
def get_requested_time(self, key):
"""Return False."""
return False
class DryRequest(CachedRequest):
"""Dummy class to use instead of L{pywikibot.data.api.Request}."""
def __init__(self, *args, **kwargs):
"""Constructor."""
_original_Request.__init__(self, *args, **kwargs)
@classmethod
def create_simple(cls, **kwargs):
"""Skip CachedRequest implementation."""
return _original_Request.create_simple(**kwargs)
def _expired(self, dt):
"""Never invalidate cached data."""
return False
def _write_cache(self, data):
"""Never write data."""
return
def submit(self):
"""Prevented method."""
raise Exception(u'DryRequest rejecting request: %r'
% self._params)
class DrySite(pywikibot.site.APISite):
"""Dummy class to use instead of L{pywikibot.site.APISite}."""
_loginstatus = pywikibot.site.LoginStatus.NOT_ATTEMPTED
def __init__(self, code, fam, user, sysop):
"""Constructor."""
super(DrySite, self).__init__(code, fam, user, sysop)
self._userinfo = pywikibot.tools.EMPTY_DEFAULT
self._paraminfo = DryParamInfo()
self._siteinfo = DummySiteinfo({})
self._siteinfo._cache['lang'] = (code, True)
self._siteinfo._cache['case'] = (
'case-sensitive' if self.family.name == 'wiktionary' else
'first-letter', True)
extensions = []
if self.family.name == 'wikisource':
extensions.append({'name': 'ProofreadPage'})
self._siteinfo._cache['extensions'] = (extensions, True)
def _build_namespaces(self):
return Namespace.builtin_namespaces(case=self.siteinfo['case'])
def __repr__(self):
"""Override default so warnings and errors indicate test is dry."""
return "%s(%r, %r)" % (self.__class__.__name__,
self.code,
self.family.name)
@property
def userinfo(self):
"""Return dry data."""
return self._userinfo
def version(self):
"""Dummy version, with warning to show the callers context."""
warn('%r returning version 1.24; override if unsuitable.'
% self, DrySiteNote, stacklevel=2)
return '1.24'
def image_repository(self):
"""Return Site object for image repository e.g. commons."""
code, fam = self.shared_image_repository()
if bool(code or fam):
return pywikibot.Site(code, fam, self.username(),
interface=self.__class__)
def data_repository(self):
"""Return Site object for data repository e.g. Wikidata."""
code, fam = self.shared_data_repository()
if bool(code or fam):
return pywikibot.Site(code, fam, self.username(),
interface=DryDataSite)
class DryDataSite(DrySite, pywikibot.site.DataSite):
"""Dummy class to use instead of L{pywikibot.site.DataSite}."""
def _build_namespaces(self):
namespaces = super(DryDataSite, self)._build_namespaces()
namespaces[0].defaultcontentmodel = 'wikibase-item'
namespaces[120] = Namespace(id=120,
case='first-letter',
canonical_name='Property',
defaultcontentmodel='wikibase-property')
return namespaces
class DryPage(pywikibot.Page):
"""Dummy class that acts like a Page but avoids network activity."""
_pageid = 1
_disambig = False
_isredir = False
def isDisambig(self):
"""Return disambig status stored in _disambig."""
return self._disambig
class FakeLoginManager(pywikibot.data.api.LoginManager):
"""Loads a fake password."""
@property
def password(self):
"""Get the fake password."""
return 'foo'
@password.setter
def password(self, value):
"""Ignore password changes."""
pass
class DummyHttp(object):
"""A class simulating the http module."""
def __init__(self, wrapper):
"""Constructor with the given PatchedHttp instance."""
self.__wrapper = wrapper
def request(self, *args, **kwargs):
"""The patched request method."""
result = self.__wrapper.before_request(*args, **kwargs)
if result is False:
result = self.__wrapper._old_http.request(*args, **kwargs)
elif isinstance(result, Mapping):
result = json.dumps(result)
elif not isinstance(result, unicode):
raise ValueError('The result is not a valid type '
'"{0}"'.format(type(result)))
response = self.__wrapper.after_request(result, *args, **kwargs)
if response is None:
response = result
return response
def fetch(self, *args, **kwargs):
"""The patched fetch method."""
result = self.__wrapper.before_fetch(*args, **kwargs)
if result is False:
result = self.__wrapper._old_http.fetch(*args, **kwargs)
elif not isinstance(result, threadedhttp.HttpRequest):
raise ValueError('The result is not a valid type '
'"{0}"'.format(type(result)))
response = self.__wrapper.after_fetch(result, *args, **kwargs)
if response is None:
response = result
return response
class PatchedHttp(object):
"""
A ContextWrapper to handle any data going through the http module.
This patches the C{http} import in the given module to a class simulating
C{request} and C{fetch}. It has a C{data} attribute which is either a
static value which the requests will return or it's a callable returning the
data. If it's a callable it'll be called with the same parameters as the
original function in the L{http} module. For fine grained control it's
possible to override/monkey patch the C{before_request} and C{before_fetch}
methods. By default they just return C{data} directory or call it if it's
callable.
Even though L{http.request} is calling L{http.fetch}, it won't call the
patched method.
The data returned for C{request} may either be C{False}, a C{unicode} or a
C{Mapping} which is converted into a json string. The data returned for
C{fetch} can only be C{False} or a L{threadedhttp.HttpRequest}. For both
variants any other types are not allowed and if it is False it'll use the
original method and do an actual request.
Afterwards it is always calling C{after_request} or C{after_fetch} with the
response and given arguments. That can return a different response too, but
can also return None so that the original response is forwarded.
"""
def __init__(self, module, data=None):
"""
Constructor.
@param module: The given module to patch. It must have the http module
imported as http.
@type module: Module
@param data: The data returned for any request or fetch.
@type data: callable or False (or other depending on request/fetch)
"""
super(PatchedHttp, self).__init__()
self._module = module
self.data = data
def _handle_data(self, *args, **kwargs):
"""Return the data after it may have been called."""
if self.data is None:
raise ValueError('No handler is defined.')
elif callable(self.data):
return self.data(*args, **kwargs)
else:
return self.data
def before_request(self, *args, **kwargs):
"""Return the value which should is returned by request."""
return self._handle_data(*args, **kwargs)
def before_fetch(self, *args, **kwargs):
"""Return the value which should is returned by fetch."""
return self._handle_data(*args, **kwargs)
def after_request(self, response, *args, **kwargs):
"""Handle the response after request."""
pass
def after_fetch(self, response, *args, **kwargs):
"""Handle the response after fetch."""
pass
def __enter__(self):
"""Patch the http module property."""
self._old_http = self._module.http
self._module.http = DummyHttp(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Reset the http module property."""
self._module.http = self._old_http
def execute(command, data_in=None, timeout=0, error=None):
"""
Execute a command and capture outputs.
@param command: executable to run and arguments to use
@type command: list of unicode
"""
# Any environment variables added on Windows must be of type
# str() on Python 2.
env = os.environ.copy()
# Python issue 6906
if PYTHON_VERSION < (2, 6, 6):
for var in ('TK_LIBRARY', 'TCL_LIBRARY', 'TIX_LIBRARY'):
if var in env:
env[var] = env[var].encode('mbcs')
# Prevent output by test package; e.g. 'max_retries reduced from x to y'
env[str('PYWIKIBOT_TEST_QUIET')] = str('1')
# sys.path may have been modified by the test runner to load dependencies.
pythonpath = os.pathsep.join(sys.path)
if sys.platform == 'win32' and sys.version_info[0] < 3:
pythonpath = str(pythonpath)
env[str('PYTHONPATH')] = pythonpath
env[str('PYTHONIOENCODING')] = str(config.console_encoding)
# LC_ALL is used by i18n.input as an alternative for userinterface_lang
if pywikibot.config.userinterface_lang:
env[str('LC_ALL')] = str(pywikibot.config.userinterface_lang)
# Set EDITOR to an executable that ignores all arguments and does nothing.
env[str('EDITOR')] = str('call' if sys.platform == 'win32' else 'true')
options = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE
}
if data_in is not None:
options['stdin'] = subprocess.PIPE
try:
p = subprocess.Popen(command, env=env, **options)
except TypeError as e:
# Generate a more informative error
if sys.platform == 'win32' and sys.version_info[0] < 3:
unicode_env = [(k, v) for k, v in os.environ.items()
if not isinstance(k, str) or
not isinstance(v, str)]
if unicode_env:
raise TypeError(
'%s: unicode in os.environ: %r' % (e, unicode_env))
child_unicode_env = [(k, v) for k, v in env.items()
if not isinstance(k, str) or
not isinstance(v, str)]
if child_unicode_env:
raise TypeError(
'%s: unicode in child env: %r' % (e, child_unicode_env))
raise
if data_in is not None:
p.stdin.write(data_in.encode(config.console_encoding))
p.stdin.flush() # _communicate() otherwise has a broken pipe
stderr_lines = b''
waited = 0
while (error or (waited < timeout)) and p.poll() is None:
# In order to kill 'shell' and others early, read only a single
# line per second, and kill the process as soon as the expected
# output has been seen.
# Additional lines will be collected later with p.communicate()
if error:
line = p.stderr.readline()
stderr_lines += line
if error in line.decode(config.console_encoding):
break
time.sleep(1)
waited += 1
if (timeout or error) and p.poll() is None:
p.kill()
if p.poll() is not None:
stderr_lines += p.stderr.read()
data_out = p.communicate()
return {'exit_code': p.returncode,
'stdout': data_out[0].decode(config.console_encoding),
'stderr': (stderr_lines + data_out[1]).decode(config.console_encoding)}
def execute_pwb(args, data_in=None, timeout=0, error=None, overrides=None):
"""
Execute the pwb.py script and capture outputs.
@param args: list of arguments for pwb.py
@type args: list of unicode
@param overrides: mapping of pywikibot symbols to test replacements
@type overrides: dict
"""
command = [sys.executable]
if overrides:
command.append('-c')
overrides = '; '.join(
'%s = %s' % (key, value) for key, value in overrides.items())
command.append(
'import pwb; import pywikibot; %s; pwb.main()'
% overrides)
else:
command.append(_pwb_py)
return execute(command=command + args,
data_in=data_in, timeout=timeout, error=error)
|
valhallasw/pywikibot-core
|
tests/utils.py
|
Python
|
mit
| 20,611
|
"""
Here is probably the place to write the docs, since the test-cases
show how the type behave.
Later...
"""
from ctypes import *
from ctypes.test import need_symbol
import os, sys, unittest
try:
WINFUNCTYPE
except NameError:
# fake to enable this test on Linux
WINFUNCTYPE = CFUNCTYPE
import _ctypes_test
dll = CDLL(_ctypes_test.__file__)
if sys.platform == "win32" or (sys.platform == "cli" and os.name == "nt"):
windll = WinDLL(_ctypes_test.__file__)
class POINT(Structure):
_fields_ = [("x", c_int), ("y", c_int)]
class RECT(Structure):
_fields_ = [("left", c_int), ("top", c_int),
("right", c_int), ("bottom", c_int)]
class FunctionTestCase(unittest.TestCase):
def test_mro(self):
# in Python 2.3, this raises TypeError: MRO conflict among bases classes,
# in Python 2.2 it works.
#
# But in early versions of _ctypes.c, the result of tp_new
# wasn't checked, and it even crashed Python.
# Found by Greg Chapman.
try:
class X(object, Array):
_length_ = 5
_type_ = "i"
except TypeError:
pass
from _ctypes import _Pointer
try:
class X(object, _Pointer):
pass
except TypeError:
pass
from _ctypes import _SimpleCData
try:
class X(object, _SimpleCData):
_type_ = "i"
except TypeError:
pass
try:
class X(object, Structure):
_fields_ = []
except TypeError:
pass
@need_symbol('c_wchar')
def test_wchar_parm(self):
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double]
result = f(1, u"x", 3, 4, 5.0, 6.0)
self.assertEqual(result, 139)
self.assertEqual(type(result), int)
@need_symbol('c_wchar')
def test_wchar_result(self):
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_wchar
result = f(0, 0, 0, 0, 0, 0)
self.assertEqual(result, u'\x00')
def test_voidresult(self):
f = dll._testfunc_v
f.restype = None
f.argtypes = [c_int, c_int, POINTER(c_int)]
result = c_int()
self.assertEqual(None, f(1, 2, byref(result)))
self.assertEqual(result.value, 3)
def test_intresult(self):
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_int
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), int)
result = f(-1, -2, -3, -4, -5.0, -6.0)
self.assertEqual(result, -21)
self.assertEqual(type(result), int)
# If we declare the function to return a short,
# is the high part split off?
f.restype = c_short
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), int)
result = f(1, 2, 3, 0x10004, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), int)
# You cannot assign character format codes as restype any longer
self.assertRaises(TypeError, setattr, f, "restype", "i")
def test_floatresult(self):
f = dll._testfunc_f_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_float
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), float)
result = f(-1, -2, -3, -4, -5.0, -6.0)
self.assertEqual(result, -21)
self.assertEqual(type(result), float)
def test_doubleresult(self):
f = dll._testfunc_d_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_double
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), float)
result = f(-1, -2, -3, -4, -5.0, -6.0)
self.assertEqual(result, -21)
self.assertEqual(type(result), float)
@unittest.skipIf(sys.platform=='cli' and os.name=='posix', 'long double on Linux - https://github.com/IronLanguages/ironpython2/issues/408')
def test_longdoubleresult(self):
f = dll._testfunc_D_bhilfD
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_longdouble]
f.restype = c_longdouble
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), float)
result = f(-1, -2, -3, -4, -5.0, -6.0)
self.assertEqual(result, -21)
self.assertEqual(type(result), float)
@need_symbol('c_longlong')
def test_longlongresult(self):
f = dll._testfunc_q_bhilfd
f.restype = c_longlong
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
f = dll._testfunc_q_bhilfdq
f.restype = c_longlong
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double, c_longlong]
result = f(1, 2, 3, 4, 5.0, 6.0, 21)
self.assertEqual(result, 42)
@unittest.skipIf(sys.platform=='cli', 'TODO: debug this test on IronPython - https://github.com/IronLanguages/ironpython2/issues/394')
def test_stringresult(self):
f = dll._testfunc_p_p
f.argtypes = None
f.restype = c_char_p
result = f("123")
self.assertEqual(result, "123")
result = f(None)
self.assertEqual(result, None)
@unittest.skipIf(sys.platform=='cli', 'TODO: debug this test on IronPython - https://github.com/IronLanguages/ironpython2/issues/389')
def test_pointers(self):
f = dll._testfunc_p_p
f.restype = POINTER(c_int)
f.argtypes = [POINTER(c_int)]
# This only works if the value c_int(42) passed to the
# function is still alive while the pointer (the result) is
# used.
v = c_int(42)
self.assertEqual(pointer(v).contents.value, 42)
result = f(pointer(v))
self.assertEqual(type(result), POINTER(c_int))
self.assertEqual(result.contents.value, 42)
# This on works...
result = f(pointer(v))
self.assertEqual(result.contents.value, v.value)
p = pointer(c_int(99))
result = f(p)
self.assertEqual(result.contents.value, 99)
arg = byref(v)
result = f(arg)
self.assertNotEqual(result.contents, v.value)
self.assertRaises(ArgumentError, f, byref(c_short(22)))
# It is dangerous, however, because you don't control the lifetime
# of the pointer:
result = f(byref(c_int(99)))
self.assertNotEqual(result.contents, 99)
def test_errors(self):
f = dll._testfunc_p_p
f.restype = c_int
class X(Structure):
_fields_ = [("y", c_int)]
self.assertRaises(TypeError, f, X()) #cannot convert parameter
################################################################
def test_shorts(self):
f = dll._testfunc_callback_i_if
args = []
expected = [262144, 131072, 65536, 32768, 16384, 8192, 4096, 2048,
1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
def callback(v):
args.append(v)
return v
CallBack = CFUNCTYPE(c_int, c_int)
cb = CallBack(callback)
f(2**18, cb)
self.assertEqual(args, expected)
################################################################
def test_callbacks(self):
f = dll._testfunc_callback_i_if
f.restype = c_int
f.argtypes = None
MyCallback = CFUNCTYPE(c_int, c_int)
def callback(value):
#print "called back with", value
return value
cb = MyCallback(callback)
result = f(-10, cb)
self.assertEqual(result, -18)
# test with prototype
f.argtypes = [c_int, MyCallback]
cb = MyCallback(callback)
result = f(-10, cb)
self.assertEqual(result, -18)
AnotherCallback = WINFUNCTYPE(c_int, c_int, c_int, c_int, c_int)
# check that the prototype works: we call f with wrong
# argument types
cb = AnotherCallback(callback)
self.assertRaises(ArgumentError, f, -10, cb)
def test_callbacks_2(self):
# Can also use simple datatypes as argument type specifiers
# for the callback function.
# In this case the call receives an instance of that type
f = dll._testfunc_callback_i_if
f.restype = c_int
MyCallback = CFUNCTYPE(c_int, c_int)
f.argtypes = [c_int, MyCallback]
def callback(value):
#print "called back with", value
self.assertEqual(type(value), int)
return value
cb = MyCallback(callback)
result = f(-10, cb)
self.assertEqual(result, -18)
@need_symbol('c_longlong')
def test_longlong_callbacks(self):
f = dll._testfunc_callback_q_qf
f.restype = c_longlong
MyCallback = CFUNCTYPE(c_longlong, c_longlong)
f.argtypes = [c_longlong, MyCallback]
def callback(value):
self.assertIsInstance(value, (int, long))
return value & 0x7FFFFFFF
cb = MyCallback(callback)
self.assertEqual(13577625587, f(1000000000000, cb))
def test_errors(self):
self.assertRaises(AttributeError, getattr, dll, "_xxx_yyy")
self.assertRaises(ValueError, c_int.in_dll, dll, "_xxx_yyy")
def test_byval(self):
# without prototype
ptin = POINT(1, 2)
ptout = POINT()
# EXPORT int _testfunc_byval(point in, point *pout)
result = dll._testfunc_byval(ptin, byref(ptout))
got = result, ptout.x, ptout.y
expected = 3, 1, 2
self.assertEqual(got, expected)
# with prototype
ptin = POINT(101, 102)
ptout = POINT()
dll._testfunc_byval.argtypes = (POINT, POINTER(POINT))
dll._testfunc_byval.restype = c_int
result = dll._testfunc_byval(ptin, byref(ptout))
got = result, ptout.x, ptout.y
expected = 203, 101, 102
self.assertEqual(got, expected)
def test_struct_return_2H(self):
class S2H(Structure):
_fields_ = [("x", c_short),
("y", c_short)]
dll.ret_2h_func.restype = S2H
dll.ret_2h_func.argtypes = [S2H]
inp = S2H(99, 88)
s2h = dll.ret_2h_func(inp)
self.assertEqual((s2h.x, s2h.y), (99*2, 88*3))
@unittest.skipUnless(sys.platform == "win32", 'Windows-specific test')
def test_struct_return_2H_stdcall(self):
class S2H(Structure):
_fields_ = [("x", c_short),
("y", c_short)]
windll.s_ret_2h_func.restype = S2H
windll.s_ret_2h_func.argtypes = [S2H]
s2h = windll.s_ret_2h_func(S2H(99, 88))
self.assertEqual((s2h.x, s2h.y), (99*2, 88*3))
def test_struct_return_8H(self):
class S8I(Structure):
_fields_ = [("a", c_int),
("b", c_int),
("c", c_int),
("d", c_int),
("e", c_int),
("f", c_int),
("g", c_int),
("h", c_int)]
dll.ret_8i_func.restype = S8I
dll.ret_8i_func.argtypes = [S8I]
inp = S8I(9, 8, 7, 6, 5, 4, 3, 2)
s8i = dll.ret_8i_func(inp)
self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h),
(9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9))
@unittest.skipUnless(sys.platform == "win32", 'Windows-specific test')
def test_struct_return_8H_stdcall(self):
class S8I(Structure):
_fields_ = [("a", c_int),
("b", c_int),
("c", c_int),
("d", c_int),
("e", c_int),
("f", c_int),
("g", c_int),
("h", c_int)]
windll.s_ret_8i_func.restype = S8I
windll.s_ret_8i_func.argtypes = [S8I]
inp = S8I(9, 8, 7, 6, 5, 4, 3, 2)
s8i = windll.s_ret_8i_func(inp)
self.assertEqual(
(s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h),
(9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9))
def test_sf1651235(self):
# see http://www.python.org/sf/1651235
proto = CFUNCTYPE(c_int, RECT, POINT)
def callback(*args):
return 0
callback = proto(callback)
self.assertRaises(ArgumentError, lambda: callback((1, 2, 3, 4), POINT()))
if __name__ == '__main__':
unittest.main()
|
slozier/ironpython2
|
Src/StdLib/Lib/ctypes/test/test_functions.py
|
Python
|
apache-2.0
| 13,037
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import subprocess
import unittest
from contextlib import contextmanager
from textwrap import dedent
from unittest import skipIf
from pants.scm.git import Git
from pants.scm.scm import Scm
from pants.testutil.git_util import MIN_REQUIRED_GIT_VERSION, git_version
from pants.util.contextutil import environment_as, pushd, temporary_dir
from pants.util.dirutil import chmod_plus_x, safe_mkdir, safe_mkdtemp, safe_open, safe_rmtree, touch
@skipIf(
git_version() < MIN_REQUIRED_GIT_VERSION,
f"The GitTest requires git >= {MIN_REQUIRED_GIT_VERSION}.",
)
class GitTest(unittest.TestCase):
@staticmethod
def init_repo(remote_name, remote):
# TODO (peiyu) clean this up, use `git_util.initialize_repo`.
subprocess.check_call(["git", "init"])
subprocess.check_call(["git", "config", "user.email", "you@example.com"])
subprocess.check_call(["git", "config", "user.name", "Your Name"])
subprocess.check_call(["git", "config", "commit.gpgSign", "false"])
subprocess.check_call(["git", "remote", "add", remote_name, remote])
def setUp(self):
self.origin = safe_mkdtemp()
with pushd(self.origin):
subprocess.check_call(["git", "init", "--bare"])
self.gitdir = safe_mkdtemp()
self.worktree = safe_mkdtemp()
self.readme_file = os.path.join(self.worktree, "README")
with environment_as(GIT_DIR=self.gitdir, GIT_WORK_TREE=self.worktree):
self.init_repo("depot", self.origin)
touch(self.readme_file)
subprocess.check_call(["git", "add", "README"])
safe_mkdir(os.path.join(self.worktree, "dir"))
with open(os.path.join(self.worktree, "dir", "f"), "w") as f:
f.write("file in subdir")
# Make some symlinks
os.symlink("f", os.path.join(self.worktree, "dir", "relative-symlink"))
os.symlink("no-such-file", os.path.join(self.worktree, "dir", "relative-nonexistent"))
os.symlink("dir/f", os.path.join(self.worktree, "dir", "not-absolute\u2764"))
os.symlink("../README", os.path.join(self.worktree, "dir", "relative-dotdot"))
os.symlink("dir", os.path.join(self.worktree, "link-to-dir"))
os.symlink("README/f", os.path.join(self.worktree, "not-a-dir"))
os.symlink("loop1", os.path.join(self.worktree, "loop2"))
os.symlink("loop2", os.path.join(self.worktree, "loop1"))
subprocess.check_call(
["git", "add", "README", "dir", "loop1", "loop2", "link-to-dir", "not-a-dir"]
)
subprocess.check_call(["git", "commit", "-am", "initial commit with decode -> \x81b"])
self.initial_rev = subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()
subprocess.check_call(["git", "tag", "first"])
subprocess.check_call(["git", "push", "--tags", "depot", "master"])
subprocess.check_call(["git", "branch", "--set-upstream-to", "depot/master"])
with safe_open(self.readme_file, "wb") as readme:
readme.write("Hello World.\u2764".encode())
subprocess.check_call(["git", "commit", "-am", "Update README."])
self.current_rev = subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()
self.clone2 = safe_mkdtemp()
with pushd(self.clone2):
self.init_repo("origin", self.origin)
subprocess.check_call(["git", "pull", "--tags", "origin", "master:master"])
with safe_open(os.path.realpath("README"), "a") as readme:
readme.write("--")
subprocess.check_call(["git", "commit", "-am", "Update README 2."])
subprocess.check_call(["git", "push", "--tags", "origin", "master"])
self.git = Git(gitdir=self.gitdir, worktree=self.worktree)
@contextmanager
def mkremote(self, remote_name):
with temporary_dir() as remote_uri:
subprocess.check_call(["git", "remote", "add", remote_name, remote_uri])
try:
yield remote_uri
finally:
subprocess.check_call(["git", "remote", "remove", remote_name])
def tearDown(self):
safe_rmtree(self.origin)
safe_rmtree(self.gitdir)
safe_rmtree(self.worktree)
safe_rmtree(self.clone2)
def test_listdir(self):
reader = self.git.repo_reader(self.initial_rev)
for dirname in ".", "./.":
results = reader.listdir(dirname)
self.assertEqual(
[b"README", b"dir", b"link-to-dir", b"loop1", b"loop2", b"not-a-dir"],
sorted(results),
)
for dirname in "dir", "./dir":
results = reader.listdir(dirname)
self.assertEqual(
[
b"f",
"not-absolute\u2764".encode(),
b"relative-dotdot",
b"relative-nonexistent",
b"relative-symlink",
],
sorted(results),
)
results = reader.listdir("link-to-dir")
self.assertEqual(
[
b"f",
"not-absolute\u2764".encode(),
b"relative-dotdot",
b"relative-nonexistent",
b"relative-symlink",
],
sorted(results),
)
with self.assertRaises(reader.MissingFileException):
with reader.listdir("bogus"):
pass
def test_lstat(self):
reader = self.git.repo_reader(self.initial_rev)
def lstat(*components):
return type(reader.lstat(os.path.join(*components)))
self.assertEqual(reader.Symlink, lstat("dir", "relative-symlink"))
self.assertEqual(reader.Symlink, lstat("not-a-dir"))
self.assertEqual(reader.File, lstat("README"))
self.assertEqual(reader.Dir, lstat("dir"))
self.assertEqual(type(None), lstat("nope-not-here"))
def test_readlink(self):
reader = self.git.repo_reader(self.initial_rev)
def readlink(*components):
return reader.readlink(os.path.join(*components))
self.assertEqual("dir/f", readlink("dir", "relative-symlink"))
self.assertEqual(None, readlink("not-a-dir"))
self.assertEqual(None, readlink("README"))
self.assertEqual(None, readlink("dir"))
self.assertEqual(None, readlink("nope-not-here"))
def test_open(self):
reader = self.git.repo_reader(self.initial_rev)
with reader.open("README") as f:
self.assertEqual(b"", f.read())
with reader.open("dir/f") as f:
self.assertEqual(b"file in subdir", f.read())
with self.assertRaises(reader.MissingFileException):
with reader.open("no-such-file") as f:
self.assertEqual(b"", f.read())
with self.assertRaises(reader.MissingFileException):
with reader.open("dir/no-such-file") as f:
pass
with self.assertRaises(reader.IsDirException):
with reader.open("dir") as f:
self.assertEqual(b"", f.read())
current_reader = self.git.repo_reader(self.current_rev)
with current_reader.open("README") as f:
self.assertEqual("Hello World.\u2764".encode(), f.read())
with current_reader.open("link-to-dir/f") as f:
self.assertEqual(b"file in subdir", f.read())
with current_reader.open("dir/relative-symlink") as f:
self.assertEqual(b"file in subdir", f.read())
with self.assertRaises(current_reader.SymlinkLoopException):
with current_reader.open("loop1") as f:
pass
with self.assertRaises(current_reader.MissingFileException):
with current_reader.open("dir/relative-nonexistent") as f:
pass
with self.assertRaises(current_reader.NotADirException):
with current_reader.open("not-a-dir") as f:
pass
with self.assertRaises(current_reader.MissingFileException):
with current_reader.open("dir/not-absolute\u2764") as f:
pass
with self.assertRaises(current_reader.MissingFileException):
with current_reader.open("dir/relative-nonexistent") as f:
pass
with current_reader.open("dir/relative-dotdot") as f:
self.assertEqual("Hello World.\u2764".encode(), f.read())
def test_integration(self):
self.assertEqual(set(), self.git.changed_files())
self.assertEqual({"README"}, self.git.changed_files(from_commit="HEAD^"))
tip_sha = self.git.commit_id
self.assertTrue(tip_sha)
self.assertTrue(tip_sha in self.git.changelog())
merge_base = self.git.merge_base()
self.assertTrue(merge_base)
self.assertTrue(merge_base in self.git.changelog())
with self.assertRaises(Scm.LocalException):
self.git.server_url
with environment_as(GIT_DIR=self.gitdir, GIT_WORK_TREE=self.worktree):
with self.mkremote("origin") as origin_uri:
# We shouldn't be fooled by remotes with origin in their name.
with self.mkremote("temp_origin"):
origin_url = self.git.server_url
self.assertEqual(origin_url, origin_uri)
self.assertTrue(
self.git.tag_name.startswith("first-"), msg="un-annotated tags should be found"
)
self.assertEqual("master", self.git.branch_name)
def edit_readme():
with open(self.readme_file, "a") as fp:
fp.write("More data.")
edit_readme()
with open(os.path.join(self.worktree, "INSTALL"), "w") as untracked:
untracked.write("make install")
self.assertEqual({"README"}, self.git.changed_files())
self.assertEqual({"README", "INSTALL"}, self.git.changed_files(include_untracked=True))
# Confirm that files outside of a given relative_to path are ignored
self.assertEqual(set(), self.git.changed_files(relative_to="non-existent"))
self.git.commit("API Changes.")
try:
# These changes should be rejected because our branch point from origin is 1 commit behind
# the changes pushed there in clone 2.
self.git.push()
except Scm.RemoteException:
with environment_as(GIT_DIR=self.gitdir, GIT_WORK_TREE=self.worktree):
subprocess.check_call(["git", "reset", "--hard", "depot/master"])
self.git.refresh()
edit_readme()
self.git.commit("""API '"' " Changes.""")
self.git.push()
# HEAD is merged into master
self.assertEqual(self.git.commit_date(self.git.merge_base()), self.git.commit_date("HEAD"))
self.assertEqual(self.git.commit_date("HEAD"), self.git.commit_date("HEAD"))
self.git.tag("second", message="""Tagged ' " Changes""")
with temporary_dir() as clone:
with pushd(clone):
self.init_repo("origin", self.origin)
subprocess.check_call(["git", "pull", "--tags", "origin", "master:master"])
with open(os.path.realpath("README"), "r") as readme:
self.assertEqual("--More data.", readme.read())
git = Git()
# Check that we can pick up committed and uncommitted changes.
with safe_open(os.path.realpath("CHANGES"), "w") as changes:
changes.write("none")
subprocess.check_call(["git", "add", "CHANGES"])
self.assertEqual({"README", "CHANGES"}, git.changed_files(from_commit="first"))
self.assertEqual("master", git.branch_name)
self.assertEqual("second", git.tag_name, msg="annotated tags should be found")
def test_detect_worktree(self):
with temporary_dir() as _clone:
with pushd(_clone):
clone = os.path.realpath(_clone)
self.init_repo("origin", self.origin)
subprocess.check_call(["git", "pull", "--tags", "origin", "master:master"])
def worktree_relative_to(cwd, expected):
# Given a cwd relative to the worktree, tests that the worktree is detected as 'expected'.
orig_cwd = os.getcwd()
try:
abs_cwd = os.path.join(clone, cwd)
if not os.path.isdir(abs_cwd):
os.mkdir(abs_cwd)
os.chdir(abs_cwd)
actual = Git.detect_worktree()
self.assertEqual(expected, actual)
finally:
os.chdir(orig_cwd)
worktree_relative_to("..", None)
worktree_relative_to(".", clone)
worktree_relative_to("is", clone)
worktree_relative_to("is/a", clone)
worktree_relative_to("is/a/dir", clone)
def test_detect_worktree_no_cwd(self):
with temporary_dir() as _clone:
with pushd(_clone):
clone = os.path.realpath(_clone)
self.init_repo("origin", self.origin)
subprocess.check_call(["git", "pull", "--tags", "origin", "master:master"])
def worktree_relative_to(some_dir, expected):
# Given a directory relative to the worktree, tests that the worktree is detected as 'expected'.
subdir = os.path.join(clone, some_dir)
if not os.path.isdir(subdir):
os.mkdir(subdir)
actual = Git.detect_worktree(subdir=subdir)
self.assertEqual(expected, actual)
worktree_relative_to("..", None)
worktree_relative_to(".", clone)
worktree_relative_to("is", clone)
worktree_relative_to("is/a", clone)
worktree_relative_to("is/a/dir", clone)
@property
def test_changes_in(self):
"""Test finding changes in a diffspecs.
To some extent this is just testing functionality of git not pants, since all pants says is
that it will pass the diffspec to git diff-tree, but this should serve to at least document
the functionality we believe works.
"""
with environment_as(GIT_DIR=self.gitdir, GIT_WORK_TREE=self.worktree):
def commit_contents_to_files(content, *files):
for path in files:
with safe_open(os.path.join(self.worktree, path), "w") as fp:
fp.write(content)
subprocess.check_call(["git", "add", "."])
subprocess.check_call(["git", "commit", "-m", f"change {files}"])
return subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()
# We can get changes in HEAD or by SHA
c1 = commit_contents_to_files("1", "foo")
self.assertEqual({"foo"}, self.git.changes_in("HEAD"))
self.assertEqual({"foo"}, self.git.changes_in(c1))
# Changes in new HEAD, from old-to-new HEAD, in old HEAD, or from old-old-head to new.
commit_contents_to_files("2", "bar")
self.assertEqual({"bar"}, self.git.changes_in("HEAD"))
self.assertEqual({"bar"}, self.git.changes_in("HEAD^..HEAD"))
self.assertEqual({"foo"}, self.git.changes_in("HEAD^"))
self.assertEqual({"foo"}, self.git.changes_in("HEAD~1"))
self.assertEqual({"foo", "bar"}, self.git.changes_in("HEAD^^..HEAD"))
# New commit doesn't change results-by-sha
self.assertEqual({"foo"}, self.git.changes_in(c1))
# Files changed in multiple diffs within a range
c3 = commit_contents_to_files("3", "foo")
self.assertEqual({"foo", "bar"}, self.git.changes_in(f"{c1}..{c3}"))
# Changes in a tag
subprocess.check_call(["git", "tag", "v1"])
self.assertEqual({"foo"}, self.git.changes_in("v1"))
# Introduce a new filename
c4 = commit_contents_to_files("4", "baz")
self.assertEqual({"baz"}, self.git.changes_in("HEAD"))
# Tag-to-sha
self.assertEqual({"baz"}, self.git.changes_in(f"v1..{c4}"))
# We can get multiple changes from one ref
commit_contents_to_files("5", "foo", "bar")
self.assertEqual({"foo", "bar"}, self.git.changes_in("HEAD"))
self.assertEqual({"foo", "bar", "baz"}, self.git.changes_in("HEAD~4..HEAD"))
self.assertEqual({"foo", "bar", "baz"}, self.git.changes_in(f"{c1}..HEAD"))
self.assertEqual({"foo", "bar", "baz"}, self.git.changes_in(f"{c1}..{c4}"))
def test_changelog_utf8(self):
with environment_as(GIT_DIR=self.gitdir, GIT_WORK_TREE=self.worktree):
def commit_contents_to_files(message, encoding, content, *files):
for path in files:
with safe_open(os.path.join(self.worktree, path), "w") as fp:
fp.write(content)
subprocess.check_call(["git", "add", "."])
subprocess.check_call(
["git", "config", "--local", "--add", "i18n.commitencoding", encoding]
)
subprocess.check_call(["git", "config", "--local", "commit.gpgSign", "false"])
try:
subprocess.check_call(["git", "commit", "-m", message.encode(encoding)])
finally:
subprocess.check_call(
["git", "config", "--local", "--unset-all", "i18n.commitencoding"]
)
return subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()
# Mix in a non-UTF-8 author to all commits to exercise the corner described here does not
# adversely impact the ability to render the changelog (even if rendering for certain
# characters is incorrect): http://comments.gmane.org/gmane.comp.version-control.git/262685
# NB: This method of override requires we include `user.name` and `user.email` even though we
# only use `user.name` to exercise non-UTF-8. Without `user.email`, it will be unset and
# commits can then fail on machines without a proper hostname setup for git to fall back to
# when concocting a last-ditch `user.email`.
non_utf8_config = dedent(
"""
[user]
name = Noralf Trønnes
email = noralf@example.com
"""
).encode("iso-8859-1")
with open(os.path.join(self.gitdir, "config"), "wb") as fp:
fp.write(non_utf8_config)
# Note the copyright symbol is used as the non-ascii character in the next 3 commits
commit_contents_to_files("START1 © END", "iso-8859-1", "1", "foo")
commit_contents_to_files("START2 © END", "latin1", "1", "bar")
commit_contents_to_files("START3 © END", "utf-8", "1", "baz")
commit_contents_to_files("START4 ~ END", "us-ascii", "1", "bip")
# Prove our non-utf-8 encodings were stored in the commit metadata.
log = subprocess.check_output(["git", "log", "--format=%e"])
self.assertEqual(
[b"us-ascii", b"latin1", b"iso-8859-1"],
[_f for _f in log.strip().splitlines() if _f],
)
# And show that the git log successfully transcodes all the commits none-the-less to utf-8
changelog = self.git.changelog()
# The ascii commit should combine with the iso-8859-1 author an fail to transcode the
# o-with-stroke character, and so it should be replaced with the utf-8 replacement character
# \uFFF or �.
self.assertIn("Noralf Tr�nnes", changelog)
self.assertIn("Noralf Tr\uFFFDnnes", changelog)
# For the other 3 commits, each of iso-8859-1, latin1 and utf-8 have an encoding for the
# o-with-stroke character - \u00F8 or ø - so we should find it;
self.assertIn("Noralf Trønnes", changelog)
self.assertIn("Noralf Tr\u00F8nnes", changelog)
self.assertIn("START1 © END", changelog)
self.assertIn("START2 © END", changelog)
self.assertIn("START3 © END", changelog)
self.assertIn("START4 ~ END", changelog)
def test_refresh_with_conflict(self):
with environment_as(GIT_DIR=self.gitdir, GIT_WORK_TREE=self.worktree):
self.assertEqual(set(), self.git.changed_files())
self.assertEqual({"README"}, self.git.changed_files(from_commit="HEAD^"))
self.assertEqual({"README"}, self.git.changes_in("HEAD"))
# Create a change on this branch that is incompatible with the change to master
with open(self.readme_file, "w") as readme:
readme.write("Conflict")
subprocess.check_call(["git", "commit", "-am", "Conflict"])
self.assertEqual(
set(), self.git.changed_files(include_untracked=True, from_commit="HEAD")
)
with self.assertRaises(Scm.LocalException):
self.git.refresh(leave_clean=False)
# The repo is dirty
self.assertEqual(
{"README"}, self.git.changed_files(include_untracked=True, from_commit="HEAD")
)
with environment_as(GIT_DIR=self.gitdir, GIT_WORK_TREE=self.worktree):
subprocess.check_call(["git", "reset", "--hard", "HEAD"])
# Now try with leave_clean
with self.assertRaises(Scm.LocalException):
self.git.refresh(leave_clean=True)
# The repo is clean
self.assertEqual(
set(), self.git.changed_files(include_untracked=True, from_commit="HEAD")
)
def test_commit_with_new_untracked_file_adds_file(self):
new_file = os.path.join(self.worktree, "untracked_file")
touch(new_file)
self.assertEqual({"untracked_file"}, self.git.changed_files(include_untracked=True))
self.git.add(new_file)
self.assertEqual({"untracked_file"}, self.git.changed_files())
self.git.commit("API Changes.")
self.assertEqual(set(), self.git.changed_files(include_untracked=True))
class DetectWorktreeFakeGitTest(unittest.TestCase):
@contextmanager
def empty_path(self):
with temporary_dir() as path:
with environment_as(PATH=path):
yield path
@contextmanager
def unexecutable_git(self):
with self.empty_path() as path:
git = os.path.join(path, "git")
touch(git)
yield git
@contextmanager
def executable_git(self):
with self.unexecutable_git() as git:
chmod_plus_x(git)
yield git
def test_detect_worktree_no_git(self):
with self.empty_path():
self.assertIsNone(Git.detect_worktree())
def test_detect_worktree_unexectuable_git(self):
with self.unexecutable_git() as git:
self.assertIsNone(Git.detect_worktree())
self.assertIsNone(Git.detect_worktree(binary=git))
def test_detect_worktree_invalid_executable_git(self):
with self.executable_git() as git:
self.assertIsNone(Git.detect_worktree())
self.assertIsNone(Git.detect_worktree(binary=git))
def test_detect_worktree_failing_git(self):
with self.executable_git() as git:
with open(git, "w") as fp:
fp.write("#!/bin/sh\n")
fp.write("exit 1")
self.assertIsNone(Git.detect_worktree())
self.assertIsNone(Git.detect_worktree(git))
def test_detect_worktree_working_git(self):
expected_worktree_dir = "/a/fake/worktree/dir"
with self.executable_git() as git:
with open(git, "w") as fp:
fp.write("#!/bin/sh\n")
fp.write("echo " + expected_worktree_dir)
self.assertEqual(expected_worktree_dir, Git.detect_worktree())
self.assertEqual(expected_worktree_dir, Git.detect_worktree(binary=git))
|
tdyas/pants
|
tests/python/pants_test/scm/test_git.py
|
Python
|
apache-2.0
| 24,683
|
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Astakos Client Exceptions
"""
class AstakosClientException(Exception):
"""Base AstakosClientException Class"""
def __init__(self, message='', details='', status=500):
self.message = message
self.details = details
if not hasattr(self, 'status'):
self.status = status
super(AstakosClientException,
self).__init__(self.message, self.details, self.status)
class BadValue(AstakosClientException):
"""Re-define ValueError Exception under AstakosClientException"""
def __init__(self, details):
message = "ValueError"
super(BadValue, self).__init__(message, details)
class InvalidResponse(AstakosClientException):
"""Return simplejson parse Exception as AstakosClient one"""
def __init__(self, message, details):
super(InvalidResponse, self).__init__(message, details)
class BadRequest(AstakosClientException):
"""BadRequest Exception"""
status = 400
class Unauthorized(AstakosClientException):
"""Unauthorized Exception"""
status = 401
class Forbidden(AstakosClientException):
"""Forbidden Exception"""
status = 403
class NotFound(AstakosClientException):
"""NotFound Exception"""
status = 404
class QuotaLimit(AstakosClientException):
"""QuotaLimit Exception"""
status = 413
class NoUserName(AstakosClientException):
"""No display name for the given uuid"""
def __init__(self, uuid):
message = "No display name for the given uuid: %s" % uuid
super(NoUserName, self).__init__(message)
class NoUUID(AstakosClientException):
"""No uuid for the given display name"""
def __init__(self, display_name):
message = "No uuid for the given display name: %s" % display_name
super(NoUUID, self).__init__(message)
class NoEndpoints(AstakosClientException):
"""No endpoints found matching the criteria given"""
def __init__(self, ep_name, ep_type, ep_region, ep_version_id):
message = "No endpoints found matching" + \
(", name = %s" % ep_name) if ep_name is not None else "" + \
(", type = %s" % ep_type) if ep_type is not None else "" + \
(", region = %s" % ep_region) \
if ep_region is not None else "" + \
(", version_id = %s" % ep_version_id) \
if ep_version_id is not None else "."
super(NoEndpoints, self).__init__(message)
|
Erethon/synnefo
|
astakosclient/astakosclient/errors.py
|
Python
|
gpl-3.0
| 3,137
|
# coding: utf-8
# ## K-means clustering
# #### This notebook presents the machine learning analysis of the gapminder dataset accessible from http://www.gapminder.org/data/
# In[1]:
get_ipython().magic(u'matplotlib inline')
# import the necessary libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing
from sklearn.cluster import KMeans
from scipy.spatial.distance import cdist
from pandas import Series, DataFrame
import statsmodels.formula.api as smf
import statsmodels.stats.multicomp as multi
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
sns.set_style('whitegrid')
sns.set_context('talk')
# Eliminate false positive SettingWithCopyWarning
pd.options.mode.chained_assignment = None
# ### Data Management
# In[2]:
# Make results reproducible
np.random.seed(12345)
# Load the dataset
df = pd.read_csv('gapminder.csv')
variables = ['incomeperperson', 'alcconsumption', 'co2emissions', 'femaleemployrate',
'internetuserate', 'lifeexpectancy','employrate','urbanrate','breastcancerper100th']
# convert to numeric format
for variable in variables:
df[variable] = pd.to_numeric(df[variable], errors='coerce')
# listwise deletion of missing values
subset = df[variables].dropna()
# Print the rows and columns of the data frame
print('Size of study data')
print(subset.shape)
# In[3]:
n_estimators=25
subset['cancerbins'] = pd.cut(subset['breastcancerper100th'], 5, labels=['0-20','21-40','41-60','61-80','81-110'])
subset['cancerbins'] = subset['cancerbins'].astype('category')
variables.pop(8)
predictors = subset[variables]
target = subset['cancerbins']
# Split into training and testing sets+
training_data, test_data, training_target, test_target = train_test_split(predictors, target, test_size=.25)
# Get size of training set
print('Size of training data')
print(training_data.shape)
# In[4]:
# Fit an Extra Trees model to the data
model = ExtraTreesClassifier()
model.fit(training_data,training_target)
# Display the relative importance of each attribute
feature_name = list(predictors.columns.values)
feature_importance = list(model.feature_importances_)
features = pd.DataFrame({'name':feature_name, 'importance':feature_importance}).sort_values(by='importance', ascending=False)
print(features.head(len(feature_name)))
# In[5]:
variables = ['alcconsumption','internetuserate', 'urbanrate', 'incomeperperson', 'lifeexpectancy']
# convert to numeric format
for variable in variables:
df[variable] = pd.to_numeric(df[variable], errors='coerce')
# Center and scale data
for variable in variables:
subset[variable]=preprocessing.scale(subset[variable].astype('float64'))
features = subset[variables]
target = subset[['breastcancerper100th']]
# ### Split Data into Training and Test Sets
# In[6]:
training_data, test_data, training_target, test_target = train_test_split(features, target, test_size=.3)
print('Size of training data')
print(training_data.shape)
# ### Determine the Number of Clusters
# In[7]:
# Identify number of clusters using the elbow method
clusters=range(1,10)
meandist=[]
for k in clusters:
model=KMeans(n_clusters=k)
model.fit(training_data)
clusassign=model.predict(training_data)
dist = sum(np.min(cdist(training_data, model.cluster_centers_, 'euclidean'), axis=1))
meandist.append(dist / training_data.shape[0])
# Visualize the elbow
k = 2
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(clusters, meandist)
ax.plot(clusters[(k-1)], meandist[(k-1)], marker='o', markersize=12,
markeredgewidth=2, markeredgecolor='r', markerfacecolor='None')
plt.grid(True)
plt.xlabel('Number of Clusters')
plt.ylabel('Average Distance')
plt.show()
# ### Visualize Clustered Data
# In[8]:
model=KMeans(n_clusters=k)
model.fit(training_data)
training_data['cluster'] = model.labels_
# Change Colors
my_cmap = plt.cm.get_cmap('brg')
my_cmap.set_under('w')
x = training_data.iloc[:,0]
y = training_data.iloc[:,1]
z = training_data.iloc[:,2]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x, y, z, c=training_data['cluster'], cmap=my_cmap)
ax.set_xlabel(training_data.columns.values[0], labelpad=15)
ax.set_ylabel(training_data.columns.values[1], labelpad=15)
ax.set_zlabel(training_data.columns.values[2], labelpad=15)
plt.show()
# In[9]:
sns.pairplot(training_data, hue ='cluster')
# ### Examine Differences Between Clusters
# In[10]:
# Add Cluster label to training targets
training_target['cluster'] = model.labels_
# OLS regression
income_model = smf.ols(formula='breastcancerper100th ~ C(cluster)', data=training_target).fit()
print (income_model.summary())
# In[11]:
print ('means for features by cluster')
m1= training_target.groupby('cluster').mean()
print (m1)
# In[12]:
print ('standard deviations for features by cluster')
m2= training_target.groupby('cluster').std()
print (m2)
# In[13]:
mc1 = multi.MultiComparison(training_target['breastcancerper100th'], training_target['cluster'])
res1 = mc1.tukeyhsd()
print(res1.summary())
# In[14]:
sns.pairplot(training_target, hue ='cluster')
|
duttashi/Data-Analysis-Visualization
|
scripts/general/k-means-breastcancer_prediction.py
|
Python
|
mit
| 5,294
|
from django import template
register = template.Library()
def easy_tag(func):
"""Decorator to facilitate template tag creation"""
def inner(parser, token):
"""deal with the repetitive parts of parsing template tags"""
try:
return func(*token.split_contents())
except TypeError:
raise template.TemplateSyntaxError('Bad arguments for tag "%s"'
% token.split_contents()[0])
inner.__name__ = func.__name__
inner.__doc__ = inner.__doc__
return inner
class AppendGetNode(template.Node):
def __init__(self, dict):
self.dict_pairs = {}
for pair in dict.split(','):
pair = pair.split('=')
self.dict_pairs[pair[0]] = template.Variable(pair[1])
def render(self, context):
get = context['request'].GET.copy()
for key in self.dict_pairs:
get[key] = self.dict_pairs[key].resolve(context)
path = context['request'].META['PATH_INFO']
if len(get):
path += "?%s" % "&".join(["%s=%s" % (key, value)
for (key, value) in get.items() if value])
return path
@register.tag()
@easy_tag
def append_to_get(_tag_name, dict):
return AppendGetNode(dict)
|
Turupawn/website
|
games/templatetags/append_to_get.py
|
Python
|
agpl-3.0
| 1,303
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from .operations.provider_operations import ProviderOperations
from .operations.labs_operations import LabsOperations
from .operations.operations import Operations
from .operations.global_schedules_operations import GlobalSchedulesOperations
from .operations.artifact_sources_operations import ArtifactSourcesOperations
from .operations.arm_templates_operations import ArmTemplatesOperations
from .operations.artifacts_operations import ArtifactsOperations
from .operations.costs_operations import CostsOperations
from .operations.custom_images_operations import CustomImagesOperations
from .operations.formulas_operations import FormulasOperations
from .operations.gallery_images_operations import GalleryImagesOperations
from .operations.notification_channels_operations import NotificationChannelsOperations
from .operations.policy_sets_operations import PolicySetsOperations
from .operations.policies_operations import PoliciesOperations
from .operations.schedules_operations import SchedulesOperations
from .operations.service_runners_operations import ServiceRunnersOperations
from .operations.users_operations import UsersOperations
from .operations.disks_operations import DisksOperations
from .operations.environments_operations import EnvironmentsOperations
from .operations.secrets_operations import SecretsOperations
from .operations.virtual_machines_operations import VirtualMachinesOperations
from .operations.virtual_machine_schedules_operations import VirtualMachineSchedulesOperations
from .operations.virtual_networks_operations import VirtualNetworksOperations
from . import models
class DevTestLabsClientConfiguration(AzureConfiguration):
"""Configuration for DevTestLabsClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: The subscription ID.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not base_url:
base_url = 'https://management.azure.com'
super(DevTestLabsClientConfiguration, self).__init__(base_url)
self.add_user_agent('azure-mgmt-devtestlabs/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
class DevTestLabsClient(object):
"""The DevTest Labs Client.
:ivar config: Configuration for client.
:vartype config: DevTestLabsClientConfiguration
:ivar provider_operations: ProviderOperations operations
:vartype provider_operations: azure.mgmt.devtestlabs.operations.ProviderOperations
:ivar labs: Labs operations
:vartype labs: azure.mgmt.devtestlabs.operations.LabsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.devtestlabs.operations.Operations
:ivar global_schedules: GlobalSchedules operations
:vartype global_schedules: azure.mgmt.devtestlabs.operations.GlobalSchedulesOperations
:ivar artifact_sources: ArtifactSources operations
:vartype artifact_sources: azure.mgmt.devtestlabs.operations.ArtifactSourcesOperations
:ivar arm_templates: ArmTemplates operations
:vartype arm_templates: azure.mgmt.devtestlabs.operations.ArmTemplatesOperations
:ivar artifacts: Artifacts operations
:vartype artifacts: azure.mgmt.devtestlabs.operations.ArtifactsOperations
:ivar costs: Costs operations
:vartype costs: azure.mgmt.devtestlabs.operations.CostsOperations
:ivar custom_images: CustomImages operations
:vartype custom_images: azure.mgmt.devtestlabs.operations.CustomImagesOperations
:ivar formulas: Formulas operations
:vartype formulas: azure.mgmt.devtestlabs.operations.FormulasOperations
:ivar gallery_images: GalleryImages operations
:vartype gallery_images: azure.mgmt.devtestlabs.operations.GalleryImagesOperations
:ivar notification_channels: NotificationChannels operations
:vartype notification_channels: azure.mgmt.devtestlabs.operations.NotificationChannelsOperations
:ivar policy_sets: PolicySets operations
:vartype policy_sets: azure.mgmt.devtestlabs.operations.PolicySetsOperations
:ivar policies: Policies operations
:vartype policies: azure.mgmt.devtestlabs.operations.PoliciesOperations
:ivar schedules: Schedules operations
:vartype schedules: azure.mgmt.devtestlabs.operations.SchedulesOperations
:ivar service_runners: ServiceRunners operations
:vartype service_runners: azure.mgmt.devtestlabs.operations.ServiceRunnersOperations
:ivar users: Users operations
:vartype users: azure.mgmt.devtestlabs.operations.UsersOperations
:ivar disks: Disks operations
:vartype disks: azure.mgmt.devtestlabs.operations.DisksOperations
:ivar environments: Environments operations
:vartype environments: azure.mgmt.devtestlabs.operations.EnvironmentsOperations
:ivar secrets: Secrets operations
:vartype secrets: azure.mgmt.devtestlabs.operations.SecretsOperations
:ivar virtual_machines: VirtualMachines operations
:vartype virtual_machines: azure.mgmt.devtestlabs.operations.VirtualMachinesOperations
:ivar virtual_machine_schedules: VirtualMachineSchedules operations
:vartype virtual_machine_schedules: azure.mgmt.devtestlabs.operations.VirtualMachineSchedulesOperations
:ivar virtual_networks: VirtualNetworks operations
:vartype virtual_networks: azure.mgmt.devtestlabs.operations.VirtualNetworksOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: The subscription ID.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = DevTestLabsClientConfiguration(credentials, subscription_id, base_url)
self._client = ServiceClient(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '2016-05-15'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.provider_operations = ProviderOperations(
self._client, self.config, self._serialize, self._deserialize)
self.labs = LabsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self.config, self._serialize, self._deserialize)
self.global_schedules = GlobalSchedulesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.artifact_sources = ArtifactSourcesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.arm_templates = ArmTemplatesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.artifacts = ArtifactsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.costs = CostsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.custom_images = CustomImagesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.formulas = FormulasOperations(
self._client, self.config, self._serialize, self._deserialize)
self.gallery_images = GalleryImagesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.notification_channels = NotificationChannelsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.policy_sets = PolicySetsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.policies = PoliciesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.schedules = SchedulesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.service_runners = ServiceRunnersOperations(
self._client, self.config, self._serialize, self._deserialize)
self.users = UsersOperations(
self._client, self.config, self._serialize, self._deserialize)
self.disks = DisksOperations(
self._client, self.config, self._serialize, self._deserialize)
self.environments = EnvironmentsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.secrets = SecretsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machines = VirtualMachinesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machine_schedules = VirtualMachineSchedulesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_networks = VirtualNetworksOperations(
self._client, self.config, self._serialize, self._deserialize)
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/dev_test_labs_client.py
|
Python
|
mit
| 10,265
|
#!/usr/bin/env python
"""
Read a MAF from stdin and break into several new mafs containing no more than
`chunk_size` columns. The new mafs will be written to `out_dir` along with a
file "intervals.txt" specifying the range covered by each new maf file. A
probability for writing each chunk can optionally be specified, resulting in
a random fraction of chunks from the input MAF being produced.
usage: %prog [options] chunk_size out_dir < maf
--prob: probability of writing versus skipping each chunk.
"""
usage = "usage: %prog chunk_size out_dir"
import sys
from optparse import OptionParser
import bx.align.maf
import psyco_full
import random
INF="inf"
def __main__():
# Parse command line arguments
parser = OptionParser( "usage: %prog chunk_size out_dir" )
parser.add_option( "--prob", action="store", default=None, type="float",
help="Probability of writing a given chunk" )
( options, args ) = parser.parse_args()
chunk_size = int( args[0] )
out_dir = args[1]
prob = options.prob
maf_reader = bx.align.maf.Reader( sys.stdin )
maf_writer = None
count = 0
current_chunk = -1
chunk_min = INF
chunk_max = 0
write_current_chunk = True
interval_file = file( "%s/intervals.txt" % out_dir, "w" )
for m in maf_reader:
chunk_min = min( chunk_min, m.components[0].start )
chunk_max = max( chunk_max, m.components[0].end )
if not maf_writer or count + m.text_size > chunk_size:
current_chunk += 1
# Finish the last chunk
if maf_writer:
maf_writer.close()
interval_file.write( "%s %s\n" % ( chunk_min, chunk_max ) )
chunk_min = INF
chunk_max = 0
# Decide if the new chunk will be written
if prob: write_current_chunk = bool( random.random() <= prob )
else: write_current_chunk = True
if write_current_chunk:
maf_writer = bx.align.maf.Writer( file( "%s/%09d.maf" % ( out_dir, current_chunk ), "w" ) )
else:
maf_writer = None
count = 0
if maf_writer: maf_writer.write( m )
#count += m.text_size
count += m.components[0].size
if maf_writer:
maf_writer.close()
interval_file.write( "%s %s\n" % ( chunk_min, chunk_max ) )
interval_file.close()
if __name__ == "__main__": __main__()
|
uhjish/bx-python
|
scripts/maf_chunk.py
|
Python
|
mit
| 2,486
|
__author__ = 'Dominic Miglar <dominic.miglar@bitmovin.net>'
from unittest import TestSuite
from .testcase_get_undefined import HttpGetUndefinedTestCase
from .testcase_post_undefined import HttpPostUndefinedTestCase
from .testcase_delete_undefined import HttpDeleteUndefinedTestCase
def get_test_suite():
test_suite = TestSuite()
test_suite.addTest(HttpGetUndefinedTestCase())
test_suite.addTest(HttpPostUndefinedTestCase())
test_suite.addTest(HttpDeleteUndefinedTestCase())
return test_suite
|
bitmovin/bitcodin-python
|
bitcodin/test/http/__init__.py
|
Python
|
unlicense
| 516
|
import itertools
import types
from copy import deepcopy, copy
from contextlib import suppress
from warnings import warn
import numpy as np
import pandas as pd
import pandas.api.types as pdtypes
from .strips import strips
from ..utils import cross_join, match
from ..exceptions import PlotnineError, PlotnineWarning
from ..scales.scales import Scales
# For default matplotlib backend
with suppress(ImportError):
from matplotlib.ticker import locale, FixedFormatter
from matplotlib.gridspec import GridSpec
class facet:
"""
Base class for all facets
Parameters
----------
scales : str in ``['fixed', 'free', 'free_x', 'free_y']``
Whether ``x`` or ``y`` scales should be allowed (free)
to vary according to the data on each of the panel.
Default is ``'fixed'``.
shrink : bool
Whether to shrink the scales to the output of the
statistics instead of the raw data. Default is ``True``.
labeller : str | function
How to label the facets. If it is a ``str``, it should
be one of ``'label_value'`` ``'label_both'`` or
``'label_context'``. Default is ``'label_value'``
as_table : bool
If ``True``, the facets are laid out like a table with
the highest values at the bottom-right. If ``False``
the facets are laid out like a plot with the highest
value a the top-right. Default it ``True``.
drop : bool
If ``True``, all factor levels not used in the data
will automatically be dropped. If ``False``, all
factor levels will be shown, regardless of whether
or not they appear in the data. Default is ``True``.
dir : str in ``['h', 'v']``
Direction in which to layout the panels. ``h`` for
horizontal and ``v`` for vertical.
"""
#: number of columns
ncol = None
#: number of rows
nrow = None
as_table = True
drop = True
shrink = True
#: Which axis scales are free
free = {'x': True, 'y': True}
#: A dict of parameters created depending on the data
#: (Intended for extensions)
params = None
# Theme object, automatically updated before drawing the plot
theme = None
# Figure object on which the facet panels are created
figure = None
# coord object, automatically updated before drawing the plot
coordinates = None
# layout object, automatically updated before drawing the plot
layout = None
# Axes
axs = None
# The first and last axes according to how MPL creates them.
# Used for labelling the x and y axes,
first_ax = None
last_ax = None
# Number of facet variables along the horizontal axis
num_vars_x = 0
# Number of facet variables along the vertical axis
num_vars_y = 0
# ggplot object that the facet belongs to
plot = None
# Facet strips
strips = None
# Control the relative size of multiple facets
# Use a subclass to change the default.
# See: facet_grid for an example
space = 'fixed'
def __init__(self, scales='fixed', shrink=True,
labeller='label_value', as_table=True,
drop=True, dir='h'):
from .labelling import as_labeller
self.shrink = shrink
self.labeller = as_labeller(labeller)
self.as_table = as_table
self.drop = drop
self.dir = dir
self.free = {'x': scales in ('free_x', 'free'),
'y': scales in ('free_y', 'free')}
def __radd__(self, gg, inplace=False):
gg = gg if inplace else deepcopy(gg)
gg.facet = copy(self)
gg.facet.plot = gg
return gg
def set(self, **kwargs):
"""
Set properties
"""
for name, value in kwargs.items():
if hasattr(self, name):
setattr(self, name, value)
else:
raise AttributeError(
"{!r} object has no attribute {}".format(
self.__class__.__name__,
name))
def setup_data(self, data):
"""
Allow the facet to manipulate the data
Parameters
----------
data : list of dataframes
Data for each of the layers
Returns
-------
data : list of dataframes
Data for each of the layers
Notes
-----
This method will be called after :meth:`setup_params`,
therefore the `params` property will be set.
"""
return data
def setup_params(self, data):
"""
Create facet parameters
Parameters
----------
data : list of dataframes
Plot data and data for the layers
"""
self.params = {}
def init_scales(self, layout, x_scale=None, y_scale=None):
scales = types.SimpleNamespace()
if x_scale is not None:
n = layout['SCALE_X'].max()
scales.x = Scales([x_scale.clone() for i in range(n)])
if y_scale is not None:
n = layout['SCALE_Y'].max()
scales.y = Scales([y_scale.clone() for i in range(n)])
return scales
def map(self, data, layout):
"""
Assign a data points to panels
Parameters
----------
data : DataFrame
Data for a layer
layout : DataFrame
As returned by self.compute_layout
Returns
-------
data : DataFrame
Data with all points mapped to the panels
on which they will be plotted.
"""
msg = "{} should implement this method."
raise NotImplementedError(
msg.format(self.__class.__name__))
def compute_layout(self, data):
"""
Compute layout
"""
msg = "{} should implement this method."
raise NotImplementedError(
msg.format(self.__class.__name__))
def finish_data(self, data, layout):
"""
Modify data before it is drawn out by the geom
The default is to return the data without modification.
Subclasses should override this method as the require.
Parameters
----------
data : DataFrame
Layer data.
layout : Layout
Layout
Returns
-------
data : DataFrame
Modified layer data
"""
return data
def train_position_scales(self, layout, layers):
"""
Compute ranges for the x and y scales
"""
_layout = layout.layout
panel_scales_x = layout.panel_scales_x
panel_scales_y = layout.panel_scales_y
# loop over each layer, training x and y scales in turn
for layer in layers:
data = layer.data
match_id = match(data['PANEL'], _layout['PANEL'])
if panel_scales_x:
x_vars = list(set(panel_scales_x[0].aesthetics) &
set(data.columns))
# the scale index for each data point
SCALE_X = _layout['SCALE_X'].iloc[match_id].tolist()
panel_scales_x.train(data, x_vars, SCALE_X)
if panel_scales_y:
y_vars = list(set(panel_scales_y[0].aesthetics) &
set(data.columns))
# the scale index for each data point
SCALE_Y = _layout['SCALE_Y'].iloc[match_id].tolist()
panel_scales_y.train(data, y_vars, SCALE_Y)
return self
def make_ax_strips(self, layout_info, ax):
"""
Create strips for the facet
Parameters
----------
layout_info : dict-like
Layout information. Row from the layout table
ax : axes
Axes to label
"""
return []
def initialise_strips(self):
"""
Initialise strips for the facet
"""
self.strips = strips.initialise(self)
def set_limits_breaks_and_labels(self, panel_params, ax):
# limits
ax.set_xlim(panel_params.x.range)
ax.set_ylim(panel_params.y.range)
# breaks
ax.set_xticks(panel_params.x.breaks)
ax.set_yticks(panel_params.y.breaks)
# minor breaks
ax.set_xticks(panel_params.x.minor_breaks, minor=True)
ax.set_yticks(panel_params.y.minor_breaks, minor=True)
# labels
ax.set_xticklabels(panel_params.x.labels)
ax.set_yticklabels(panel_params.y.labels)
# When you manually set the tick labels MPL changes the locator
# so that it no longer reports the x & y positions
# Fixes https://github.com/has2k1/plotnine/issues/187
ax.xaxis.set_major_formatter(MyFixedFormatter(panel_params.x.labels))
ax.yaxis.set_major_formatter(MyFixedFormatter(panel_params.y.labels))
get_property = self.theme.themeables.property
# Padding between ticks and text
try:
margin = get_property('axis_text_x', 'margin')
except KeyError:
pad_x = 2.4
else:
pad_x = margin.get_as('t', 'pt')
try:
margin = get_property('axis_text_y', 'margin')
except KeyError:
pad_y = 2.4
else:
pad_y = margin.get_as('r', 'pt')
ax.tick_params(axis='x', which='major', pad=pad_x)
ax.tick_params(axis='y', which='major', pad=pad_y)
def __deepcopy__(self, memo):
"""
Deep copy without copying the dataframe and environment
"""
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
old = self.__dict__
new = result.__dict__
# don't make a deepcopy of the figure & the axes
shallow = {'figure', 'axs', 'first_ax', 'last_ax'}
for key, item in old.items():
if key in shallow:
new[key] = old[key]
memo[id(new[key])] = new[key]
else:
new[key] = deepcopy(old[key], memo)
return result
def _create_subplots(self, fig, layout):
"""
Create suplots and return axs
"""
num_panels = len(layout)
axsarr = np.empty((self.nrow, self.ncol), dtype=object)
space = self.space
default_space = {
'x': [1 for x in range(self.ncol)],
'y': [1 for x in range(self.nrow)],
}
if isinstance(space, str):
if space == 'fixed':
space = default_space
# TODO: Implement 'free', 'free_x' & 'free_y'
else:
space = default_space
elif isinstance(space, dict):
if 'x' not in space:
space['x'] = default_space['x']
if 'y' not in space:
space['y'] = default_space['y']
if len(space['x']) != self.ncol:
raise ValueError(
"The number of x-ratios for the facet space sizes "
"should match the number of columns."
)
if len(space['y']) != self.nrow:
raise ValueError(
"The number of y-ratios for the facet space sizes "
"should match the number of rows."
)
gs = GridSpec(
self.nrow,
self.ncol,
height_ratios=space['y'],
width_ratios=space['x']
)
# Create axes
i = 1
for row in range(self.nrow):
for col in range(self.ncol):
axsarr[row, col] = fig.add_subplot(gs[i - 1])
i += 1
# Rearrange axes
# They are ordered to match the positions in the layout table
if self.dir == 'h':
order = 'C'
if not self.as_table:
axsarr = axsarr[::-1]
elif self.dir == 'v':
order = 'F'
if not self.as_table:
axsarr = np.array([row[::-1] for row in axsarr])
axs = axsarr.ravel(order)
# Delete unused axes
for ax in axs[num_panels:]:
fig.delaxes(ax)
axs = axs[:num_panels]
return axs
def make_axes(self, figure, layout, coordinates):
"""
Create and return Matplotlib axes
"""
axs = self._create_subplots(figure, layout)
# Used for labelling the x and y axes, the first and
# last axes according to how MPL creates them.
self.first_ax = figure.axes[0]
self.last_ax = figure.axes[-1]
self.figure = figure
self.axs = axs
return axs
def spaceout_and_resize_panels(self):
"""
Adjust the spacing between the panels and resize them
to meet the aspect ratio
"""
pass
def check_axis_text_space(self):
_adjust = self.theme.themeables.get('subplots_adjust')
if _adjust:
has_wspace = 'wspace' in _adjust.properties['value']
has_hspace = 'hspace' in _adjust.properties['value']
else:
has_wspace = False
has_hspace = False
warn_x = self.ncol > 1 and self.free['y'] and not has_wspace
warn_y = self.nrow > 1 and self.free['x'] and not has_hspace
if warn_x:
warn("If you need more space for the x-axis tick text use "
"... + theme(subplots_adjust={'wspace': 0.25}). "
"Choose an appropriate value for 'wspace'.",
PlotnineWarning
)
if warn_y:
warn("If you need more space for the y-axis tick text use "
"... + theme(subplots_adjust={'hspace': 0.25}). "
"Choose an appropriate value for 'hspace'",
PlotnineWarning
)
def combine_vars(data, environment=None, vars=None, drop=True):
"""
Base layout function that generates all combinations of data
needed for facetting
The first data frame in the list should be the default data
for the plot. Other data frames in the list are ones that are
added to the layers.
"""
if not vars:
return pd.DataFrame()
# For each layer, compute the facet values
values = [eval_facet_vars(df, vars, environment)
for df in data if df is not None]
# Form the base data frame which contains all combinations
# of facetting variables that appear in the data
has_all = [x.shape[1] == len(vars) for x in values]
if not any(has_all):
raise PlotnineError(
"At least one layer must contain all variables " +
"used for facetting")
base = pd.concat([x for i, x in enumerate(values) if has_all[i]],
axis=0)
base = base.drop_duplicates()
if not drop:
base = unique_combs(base)
# sorts according to order of factor levels
base = base.sort_values(list(base.columns))
# Systematically add on missing combinations
for i, value in enumerate(values):
if has_all[i] or len(value.columns) == 0:
continue
old = base.loc[:, base.columns.difference(value.columns)]
new = value.loc[
:,
base.columns.intersection(value.columns)
].drop_duplicates()
if not drop:
new = unique_combs(new)
base = base.append(cross_join(old, new), ignore_index=True)
if len(base) == 0:
raise PlotnineError(
"Faceting variables must have at least one value")
base = base.reset_index(drop=True)
return base
def unique_combs(df):
"""
Return data frame with all possible combinations
of the values in the columns
"""
def _unique(s):
if isinstance(s.dtype, pdtypes.CategoricalDtype):
return s.cat.categories
return s.unique()
# List of unique values from every column
lst = (_unique(x) for _, x in df.iteritems())
rows = list(itertools.product(*lst))
_df = pd.DataFrame(rows, columns=df.columns)
# preserve the column dtypes
for col in df:
_df[col] = _df[col].astype(df[col].dtype, copy=False)
return _df
def layout_null():
layout = pd.DataFrame({'PANEL': 1, 'ROW': 1, 'COL': 1,
'SCALE_X': 1, 'SCALE_Y': 1,
'AXIS_X': True, 'AXIS_Y': True},
index=[0])
return layout
def add_missing_facets(data, layout, vars, facet_vals):
# When in a dataframe some layer does not have all
# the facet variables, add the missing facet variables
# and create new data where the points(duplicates) are
# present in all the facets
missing_facets = set(vars) - set(facet_vals)
if missing_facets:
to_add = layout.loc[:, missing_facets].drop_duplicates()
to_add.reset_index(drop=True, inplace=True)
# a point for each facet, [0, 1, ..., n-1, 0, 1, ..., n-1, ...]
data_rep = np.tile(np.arange(len(data)), len(to_add))
# a facet for each point, [0, 0, 0, 1, 1, 1, ... n-1, n-1, n-1]
facet_rep = np.repeat(np.arange(len(to_add)), len(data))
data = data.iloc[data_rep, :].reset_index(drop=True)
facet_vals = facet_vals.iloc[data_rep, :].reset_index(drop=True)
to_add = to_add.iloc[facet_rep, :].reset_index(drop=True)
facet_vals = pd.concat([facet_vals, to_add],
axis=1, ignore_index=False)
return data, facet_vals
def eval_facet_vars(data, vars, env):
"""
Evaluate facet variables
Parameters
----------
data : DataFrame
Factet dataframe
vars : list
Facet variables
env : environment
Plot environment
Returns
-------
facet_vals : DataFrame
Facet values that correspond to the specified
variables.
"""
# To allow expressions in facet formula
def I(value):
return value
env = env.with_outer_namespace({'I': I})
facet_vals = pd.DataFrame(index=data.index)
for name in vars:
if name in data:
# This is a limited solution. If a keyword is
# part of an expression it will fail in the
# else statement below
res = data[name]
elif str.isidentifier(name):
# All other non-statements
continue
else:
# Statements
try:
res = env.eval(name, inner_namespace=data)
except NameError:
continue
facet_vals[name] = res
return facet_vals
class MyFixedFormatter(FixedFormatter):
def format_data(self, value):
"""
Return a formatted string representation of a number.
"""
s = locale.format_string('%1.10e', (value,))
return self.fix_minus(s)
|
has2k1/plotnine
|
plotnine/facets/facet.py
|
Python
|
gpl-2.0
| 18,851
|
#import win32traceutil
import traceback
import sys
import os
import time
import new
# these three are required pre-imported, for pyjamas to work
# with the pyjd imputil etc. awful, i know...
import threading
import encodings
import encodings.cp437
from windows import *
from ctypes import *
from ctypes.wintypes import *
import comtypes
from comtypes import IUnknown
from comtypes.automation import IDispatch, VARIANT
from comtypes.client import wrap, GetModule
from comtypes.client.dynamic import Dispatch
import comtypes.gen
if not hasattr(sys, 'frozen'):
GetModule('atl.dll')
GetModule('shdocvw.dll')
GetModule('msxml2.dll')
GetModule('mshtml.tlb')
from comtypes.gen import SHDocVw
from comtypes.gen import MSXML2
from comtypes.gen import MSHTML
atl = windll.atl # If this fails, you need atl.dll
# do this after gen stuff, above
import mshtmlevents
class EventSink(object):
# some DWebBrowserEvents
def OnVisible(self, this, *args):
print "OnVisible", args
def BeforeNavigate(self, this, *args):
print "BeforeNavigate", args
def NavigateComplete(self, this, *args):
print "NavigateComplete", this, args
return
# some DWebBrowserEvents2
def BeforeNavigate2(self, this, *args):
print "BeforeNavigate2", args
def NavigateComplete2(self, this, *args):
print "NavigateComplete2", args
def DocumentComplete(self, this, *args):
print "DocumentComplete", args
if self.workaround_ignore_first_doc_complete == False:
# ignore first about:blank. *sigh*...
# TODO: work out how to parse *args byref VARIANT
# in order to get at the URI.
self.workaround_ignore_first_doc_complete = True
return
self._loaded()
def NewWindow2(self, this, *args):
print "NewWindow2", args
return
v = cast(args[1]._.c_void_p, POINTER(VARIANT))[0]
v.value = True
def NewWindow3(self, this, *args):
print "NewWindow3", args
return
v = cast(args[1]._.c_void_p, POINTER(VARIANT))[0]
v.value = True
fn_txt = """\
def event_fn(self, *args):
print "event %s", self, args
print "event callbacks", self._listeners
callbacks = self._listeners.get('%s', [])
for fn in callbacks:
try:
fn(self._sender, Dispatch(args[0]), True)
except:
sys.stderr.write( traceback.print_exc() )
sys.stderr.flush()
"""
class EventCaller:
def __init__(self, handler, name):
self.handler = handler
self.name = name
def __call__(self, *args):
callbacks = self.handler._listeners.get(self.name, [])
print "event", self.name, callbacks
for fn in callbacks:
try:
fn(self.handler._sender, Dispatch(args[0]), True)
except:
sys.stderr.write( traceback.print_exc() )
sys.stderr.flush()
class EventHandler(object):
def __init__(self, sender):
self._sender = sender
self._listeners = {}
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
raise AttributeError(name)
print "EventHandler requested ", name
if name.startswith('_') or name == 'addEventListener':
return self.__dict__[name]
idx = name.find('_on')
if idx >= 0:
if idx > 0:
name = name[idx+1:]
#return EventCaller(self, name)
exec fn_txt % (name[2:], name[2:])
print event_fn
return new.instancemethod(event_fn, self)
raise AttributeError(name)
def addEventListener(self, name, fn):
if not self._listeners.has_key(name):
self._listeners[name] = []
self._listeners[name].append(fn)
class Browser(EventSink):
def __init__(self, application, appdir):
EventSink.__init__(self)
self.platform = 'mshtml'
self.application = application
self.appdir = appdir
self.already_initialised = False
self.workaround_ignore_first_doc_complete = False
self.window_handler = None
self.node_handlers = {}
# Create an instance of IE via AtlAxWin.
atl.AtlAxWinInit()
hInstance = GetModuleHandle(None)
self.hwnd = CreateWindowEx(0,
"AtlAxWin",
"about:blank",
WS_OVERLAPPEDWINDOW |
WS_VISIBLE |
WS_HSCROLL | WS_VSCROLL,
CW_USEDEFAULT,
CW_USEDEFAULT,
CW_USEDEFAULT,
CW_USEDEFAULT,
NULL,
NULL,
hInstance,
NULL)
# Get the IWebBrowser2 interface for the IE control.
self.pBrowserUnk = POINTER(IUnknown)()
atl.AtlAxGetControl(self.hwnd, byref(self.pBrowserUnk))
# the wrap call querys for the default interface
self.pBrowser = wrap(self.pBrowserUnk)
self.pBrowser.RegisterAsBrowser = True
self.pBrowser.AddRef()
self.conn = mshtmlevents.GetEvents(self.pBrowser, sink=self,
interface=SHDocVw.DWebBrowserEvents2)
def _alert(self, txt):
self.getDomWindow().alert(txt)
def load_app(self):
uri = self.application
if uri.find(":") == -1:
# assume file
uri = 'file://'+os.path.abspath(uri)
print "load_app", uri
self.application = uri
v = byref(VARIANT())
self.pBrowser.Navigate(uri, v, v, v, v)
# Show Window
cw = c_int(self.hwnd)
ShowWindow(cw, c_int(SW_SHOW))
UpdateWindow(cw)
def getDomDocument(self):
return Dispatch(self.pBrowser.Document)
def getDomWindow(self):
return self.getDomDocument().parentWindow
def _addXMLHttpRequestEventListener(self, node, event_name, event_fn):
print "_addXMLHttpRequestEventListener", event_name
rcvr = mshtmlevents._DispEventReceiver()
rcvr.dispmap = {0: event_fn}
print rcvr
rcvr.sender = node
print rcvr.sender
ifc = rcvr.QueryInterface(IDispatch)
print ifc
v = VARIANT(ifc)
print v
setattr(node, event_name, v)
return ifc
def addEventListener(self, node, event_name, event_fn):
rcvr = mshtmlevents._DispEventReceiver()
rcvr.dispmap = {0: event_fn}
rcvr.sender = node
ifc = rcvr.QueryInterface(IDispatch)
v = VARIANT(ifc)
setattr(node, "on"+event_name, v)
return ifc
rcvr = mshtmlevents.GetDispEventReceiver(MSHTML.HTMLElementEvents2, event_fn, "on%s" % event_name)
rcvr.sender = node
ifc = rcvr.QueryInterface(IDispatch)
node.attachEvent("on%s" % event_name, ifc)
return ifc
def mash_attrib(self, attrib_name):
return attrib_name
def _addWindowEventListener(self, event_name, event_fn):
print "_addWindowEventListener", event_name, event_fn
#rcvr = mshtmlevents.GetDispEventReceiver(MSHTML.HTMLWindowEvents,
# event_fn, "on%s" % event_name)
#print rcvr
#rcvr.sender = self.getDomWindow()
#print rcvr.sender
#ifc = rcvr.QueryInterface(IDispatch)
#print ifc
#v = VARIANT(ifc)
#print v
#setattr(self.getDomWindow(), "on%s" % event_name, v)
#return ifc
wnd = self.pBrowser.Document.parentWindow
if self.window_handler is None:
self.window_handler = EventHandler(self)
self.window_conn = mshtmlevents.GetEvents(wnd,
sink=self.window_handler,
interface=MSHTML.HTMLWindowEvents2)
self.window_handler.addEventListener(event_name, event_fn)
return event_name # hmmm...
def getXmlHttpRequest(self):
print "getXMLHttpRequest"
o = comtypes.client.CreateObject('MSXML2.XMLHTTP.3.0')
print "getXMLHttpRequest", o
return Dispatch(o)
def getUri(self):
return self.application
def _loaded(self):
print "loaded"
if self.already_initialised:
return
self.already_initialised = True
self._addWindowEventListener("unload", self.on_unload_callback)
from __pyjamas__ import pygwt_processMetas, set_main_frame
set_main_frame(self)
(pth, app) = os.path.split(self.application)
if self.appdir:
pth = os.path.abspath(self.appdir)
sys.path.append(pth)
def on_unload_callback(self, *args):
PostQuitMessage(0)
global timer_q
timer_q = []
WM_USER_TIMER = RegisterWindowMessage("Timer Notify")
def MainWin(one_event):
# Pump Messages
msg = MSG()
pMsg = pointer(msg)
while 1:
res = GetMessage( pMsg, NULL, 0, 0)
if res == -1:
return 0
if res == 0:
break
if timer_q:
fn = timer_q.pop()
fn()
if msg.message == WM_USER_TIMER:
continue
TranslateMessage(pMsg)
DispatchMessage(pMsg)
if one_event:
break
return msg.wParam
global wv
wv = None
def add_timer_queue(fn):
timer_q.append(fn)
PostMessage(c_int(wv.hwnd), UINT(WM_USER_TIMER), WPARAM(0), LPARAM(0xffff))
def is_loaded():
return wv.already_initialised
def run(one_event=False, block=True):
try:
MainWin(one_event) # TODO: ignore block arg for now
except:
sys.stderr.write( traceback.print_exc() )
sys.stderr.flush()
def setup(application, appdir=None, width=800, height=600):
global wv
wv = Browser(application, appdir)
wv.load_app()
while 1:
if is_loaded():
return
run(one_event=True)
|
lovelysystems/pyjamas
|
pyjd/mshtml.py
|
Python
|
apache-2.0
| 10,213
|
from pytest_voluptuous import S
from voluptuous.validators import ExactSequence, Datetime
from skylines.model.notification import create_follower_notification
from tests.api import auth_for
from tests.data import users
def test_list_all(db_session, client):
john = users.john()
jane = users.jane()
max = users.max()
create_follower_notification(john, jane)
create_follower_notification(john, max)
create_follower_notification(jane, max)
db_session.commit()
res = client.get("/notifications", headers=auth_for(john))
assert res.status_code == 200
assert res.json == S(
{
u"events": ExactSequence(
[
{
u"actor": {u"id": int, u"name": u"Max Mustermann"},
u"id": int,
u"time": Datetime("%Y-%m-%dT%H:%M:%S.%f+00:00"),
u"type": u"follower",
u"unread": True,
u"user": {u"id": int, u"name": u"John Doe"},
},
{
u"actor": {u"id": int, u"name": u"Jane Doe"},
u"id": int,
u"time": Datetime("%Y-%m-%dT%H:%M:%S.%f+00:00"),
u"type": u"follower",
u"unread": True,
u"user": {u"id": int, u"name": u"John Doe"},
},
]
)
}
)
|
skylines-project/skylines
|
tests/api/views/notifications/list_test.py
|
Python
|
agpl-3.0
| 1,495
|
def extractWwwBlexbinNet(item):
'''
Parser for 'www.blexbin.net'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractWwwBlexbinNet.py
|
Python
|
bsd-3-clause
| 542
|
## @file
# This is an XML API that uses a syntax similar to XPath, but it is written in
# standard python so that no extra python packages are required to use it.
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
XmlRoutines
'''
##
# Import Modules
#
import xml.dom.minidom
import re
from Logger.ToolError import PARSER_ERROR
import Logger.Log as Logger
## Create a element of XML
#
# @param Name
# @param String
# @param NodeList
# @param AttributeList
#
def CreateXmlElement(Name, String, NodeList, AttributeList):
Doc = xml.dom.minidom.Document()
Element = Doc.createElement(Name)
if String != '' and String != None:
Element.appendChild(Doc.createTextNode(String))
for Item in NodeList:
if type(Item) == type([]):
Key = Item[0]
Value = Item[1]
if Key != '' and Key != None and Value != '' and Value != None:
Node = Doc.createElement(Key)
Node.appendChild(Doc.createTextNode(Value))
Element.appendChild(Node)
else:
Element.appendChild(Item)
for Item in AttributeList:
Key = Item[0]
Value = Item[1]
if Key != '' and Key != None and Value != '' and Value != None:
Element.setAttribute(Key, Value)
return Element
## Get a list of XML nodes using XPath style syntax.
#
# Return a list of XML DOM nodes from the root Dom specified by XPath String.
# If the input Dom or String is not valid, then an empty list is returned.
#
# @param Dom The root XML DOM node.
# @param String A XPath style path.
#
def XmlList(Dom, String):
if String == None or String == "" or Dom == None or Dom == "":
return []
if Dom.nodeType == Dom.DOCUMENT_NODE:
Dom = Dom.documentElement
if String[0] == "/":
String = String[1:]
TagList = String.split('/')
Nodes = [Dom]
Index = 0
End = len(TagList) - 1
while Index <= End:
ChildNodes = []
for Node in Nodes:
if Node.nodeType == Node.ELEMENT_NODE and Node.tagName == \
TagList[Index]:
if Index < End:
ChildNodes.extend(Node.childNodes)
else:
ChildNodes.append(Node)
Nodes = ChildNodes
ChildNodes = []
Index += 1
return Nodes
## Get a single XML node using XPath style syntax.
#
# Return a single XML DOM node from the root Dom specified by XPath String.
# If the input Dom or String is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM node.
# @param String A XPath style path.
#
def XmlNode(Dom, String):
if String == None or String == "" or Dom == None or Dom == "":
return None
if Dom.nodeType == Dom.DOCUMENT_NODE:
Dom = Dom.documentElement
if String[0] == "/":
String = String[1:]
TagList = String.split('/')
Index = 0
End = len(TagList) - 1
ChildNodes = [Dom]
while Index <= End:
for Node in ChildNodes:
if Node.nodeType == Node.ELEMENT_NODE and \
Node.tagName == TagList[Index]:
if Index < End:
ChildNodes = Node.childNodes
else:
return Node
break
Index += 1
return None
## Get a single XML element using XPath style syntax.
#
# Return a single XML element from the root Dom specified by XPath String.
# If the input Dom or String is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM object.
# @param Strin A XPath style path.
#
def XmlElement(Dom, String):
try:
return XmlNode(Dom, String).firstChild.data.strip()
except BaseException:
return ""
## Get a single XML element using XPath style syntax.
#
# Similar with XmlElement, but do not strip all the leading and tailing space
# and newline, instead just remove the newline and spaces introduced by
# toprettyxml()
#
# @param Dom The root XML DOM object.
# @param Strin A XPath style path.
#
def XmlElement2(Dom, String):
try:
HelpStr = XmlNode(Dom, String).firstChild.data
gRemovePrettyRe = re.compile(r"""(?:(\n *) )(.*)\1""", re.DOTALL)
HelpStr = re.sub(gRemovePrettyRe, r"\2", HelpStr)
return HelpStr
except BaseException:
return ""
## Get a single XML element of the current node.
#
# Return a single XML element specified by the current root Dom.
# If the input Dom is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM object.
#
def XmlElementData(Dom):
try:
return Dom.firstChild.data.strip()
except BaseException:
return ""
## Get a list of XML elements using XPath style syntax.
#
# Return a list of XML elements from the root Dom specified by XPath String.
# If the input Dom or String is not valid, then an empty list is returned.
#
# @param Dom The root XML DOM object.
# @param String A XPath style path.
#
def XmlElementList(Dom, String):
return map(XmlElementData, XmlList(Dom, String))
## Get the XML attribute of the current node.
#
# Return a single XML attribute named Attribute from the current root Dom.
# If the input Dom or Attribute is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM object.
# @param Attribute The name of Attribute.
#
def XmlAttribute(Dom, Attribute):
try:
return Dom.getAttribute(Attribute)
except BaseException:
return ''
## Get the XML node name of the current node.
#
# Return a single XML node name from the current root Dom.
# If the input Dom is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM object.
#
def XmlNodeName(Dom):
try:
return Dom.nodeName.strip()
except BaseException:
return ''
## Parse an XML file.
#
# Parse the input XML file named FileName and return a XML DOM it stands for.
# If the input File is not a valid XML file, then an empty string is returned.
#
# @param FileName The XML file name.
#
def XmlParseFile(FileName):
try:
XmlFile = open(FileName)
Dom = xml.dom.minidom.parse(XmlFile)
XmlFile.close()
return Dom
except BaseException, XExcept:
XmlFile.close()
Logger.Error('\nUPT', PARSER_ERROR, XExcept, File=FileName, RaiseError=True)
|
svn2github/vbox
|
src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/UPT/Library/Xml/XmlRoutines.py
|
Python
|
gpl-2.0
| 6,987
|
from hearthbreaker.cards.minions.neutral import (
BloodfenRaptor,
IronbeakOwl,
NoviceEngineer,
StonetuskBoar,
WarGolem,
MogushanWarden,
FaerieDragon,
KoboldGeomancer,
ElvenArcher,
ArgentSquire,
SilvermoonGuardian,
TwilightDrake,
MagmaRager,
DireWolfAlpha,
WorgenInfiltrator,
Archmage,
DalaranMage,
Malygos,
AzureDrake,
OgreMagi,
Spellbreaker,
BloodmageThalnos,
LootHoarder,
LeperGnome,
IronforgeRifleman,
GnomishInventor,
GoldshireFootman,
FrostwolfGrunt,
IronfurGrizzly,
LordOfTheArena,
MurlocRaider,
ManaAddict,
OasisSnapjaw,
RecklessRocketeer,
RiverCrocolisk,
SenjinShieldmasta,
ScarletCrusader,
Shieldbearer,
SilverbackPatriarch,
JunglePanther,
RavenholdtAssassin,
StormpikeCommando,
StormwindKnight,
StranglethornTiger,
Sunwalker,
ThrallmarFarseer,
WindfuryHarpy,
YoungDragonhawk,
Wolfrider,
BootyBayBodyguard,
BoulderfistOgre,
ChillwindYeti,
CoreHound,
VoodooDoctor,
EarthenRingFarseer,
ArcaneGolem,
PriestessOfElune,
DarkscaleHealer,
ArgentCommander,
BluegillWarrior,
Wisp,
Nightblade,
ShatteredSunCleric,
TheBlackKnight,
AbusiveSergeant,
DarkIronDwarf,
Abomination,
AmaniBerserker,
SilverHandKnight,
FenCreeper,
VentureCoMercenary,
StormwindChampion,
Deathwing,
Alexstrasza,
EmperorCobra,
CrazedAlchemist,
AcidicSwampOoze,
AncientBrewmaster,
YouthfulBrewmaster,
BaronGeddon,
AngryChicken,
RagingWorgen,
TaurenWarrior,
SpitefulSmith,
BloodKnight,
FrostwolfWarlord,
RaidLeader,
DragonlingMechanic,
MurlocTidehunter,
RazorfenHunter,
KnifeJuggler,
CairneBloodhoof,
HarvestGolem,
TheBeast,
SylvanasWindrunner,
StampedingKodo,
FrostElemental,
Demolisher,
Doomsayer,
Gruul,
Hogger,
ImpMaster,
InjuredBlademaster,
MasterSwordsmith,
NatPagle,
Nozdormu,
RagnarosTheFirelord,
ColdlightOracle,
ColdlightSeer,
GrimscaleOracle,
MurlocWarleader,
AncientWatcher,
BigGameHunter,
BloodsailCorsair,
BloodsailRaider,
CaptainGreenskin,
HungryCrab,
MadBomber,
ManaWraith,
MindControlTech,
MurlocTidecaller,
Onyxia,
SouthseaCaptain,
SouthseaDeckhand,
YoungPriestess,
AcolyteOfPain,
CultMaster,
Secretkeeper,
VioletTeacher,
GadgetzanAuctioneer,
IllidanStormrage,
Lightwarden,
FlesheatingGhoul,
QuestingAdventurer,
GurubashiBerserker,
AncientMage,
DefenderOfArgus,
SunfuryProtector,
HarrisonJones,
KingMukla,
LeeroyJenkins,
SeaGiant,
MoltenGiant,
MountainGiant,
DreadCorsair,
CaptainsParrot,
TinkmasterOverspark,
AlarmoBot,
EliteTaurenChieftain,
MillhouseManastorm,
PintSizedSummoner,
OldMurkEye,
Ysera,
GelbinMekkatorque,
LorewalkerCho,
WildPyromancer,
FacelessManipulator,
NerubianEgg,
Maexxna,
HauntedCreeper,
NerubarWeblord,
UnstableGhoul,
Loatheb,
StoneskinGargoyle,
SludgeBelcher,
BaronRivendare,
DancingSwords,
Deathlord,
SpectralKnight,
Undertaker,
WailingSoul,
ZombieChow,
Feugen,
Stalagg,
MadScientist,
EchoingOoze,
ShadeOfNaxxramas,
KelThuzad,
)
from hearthbreaker.cards.minions.druid import (
KeeperOfTheGrove,
DruidOfTheClaw,
AncientOfLore,
AncientOfWar,
IronbarkProtector,
Cenarius,
)
from hearthbreaker.cards.minions.hunter import (
TimberWolf,
SavannahHighmane,
Houndmaster,
KingKrush,
StarvingBuzzard,
TundraRhino,
ScavengingHyena,
Webspinner,
)
from hearthbreaker.cards.minions.mage import (
ManaWyrm,
SorcerersApprentice,
KirinTorMage,
EtherealArcanist,
WaterElemental,
ArchmageAntonidas,
)
from hearthbreaker.cards.minions.paladin import (
AldorPeacekeeper,
ArgentProtector,
GuardianOfKings,
TirionFordring,
)
from hearthbreaker.cards.minions.priest import (
AuchenaiSoulpriest,
CabalShadowPriest,
Lightspawn,
Lightwell,
NorthshireCleric,
ProphetVelen,
TempleEnforcer,
DarkCultist,
)
from hearthbreaker.cards.minions.rogue import (
AnubarAmbusher,
DefiasRingleader,
EdwinVanCleef,
Kidnapper,
MasterOfDisguise,
PatientAssassin,
SI7Agent,
)
from hearthbreaker.cards.minions.shaman import (
AlAkirTheWindlord,
DustDevil,
EarthElemental,
FireElemental,
FlametongueTotem,
ManaTideTotem,
UnboundElemental,
Windspeaker,
)
from hearthbreaker.cards.minions.warlock import (
FlameImp,
PitLord,
VoidWalker,
DreadInfernal,
Felguard,
Doomguard,
Succubus,
SummoningPortal,
BloodImp,
LordJaraxxus,
VoidTerror,
Voidcaller,
)
from hearthbreaker.cards.minions.warrior import (
ArathiWeaponsmith,
Armorsmith,
CruelTaskmaster,
FrothingBerserker,
GrommashHellscream,
KorkronElite,
WarsongCommander,
)
|
anuragpapineni/Hearthbreaker-evolved-agent
|
hearthbreaker/cards/minions/__init__.py
|
Python
|
mit
| 5,168
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from .base import Capability, BaseObject, Field, StringField, BytesField, IntField, \
UserError
from weboob.tools.ordereddict import OrderedDict
__all__ = ['ProfileNode', 'ContactPhoto', 'Contact', 'QueryError', 'Query', 'CapContact']
class ProfileNode(object):
"""
Node of a :class:`Contact` profile.
"""
HEAD = 0x01
SECTION = 0x02
def __init__(self, name, label, value, sufix=None, flags=0):
self.name = name
self.label = label
self.value = value
self.sufix = sufix
self.flags = flags
def __getitem__(self, key):
return self.value[key]
class ContactPhoto(BaseObject):
"""
Photo of a contact.
"""
name = StringField('Name of the photo')
url = StringField('Direct URL to photo')
data = BytesField('Data of photo')
thumbnail_url = StringField('Direct URL to thumbnail')
thumbnail_data = BytesField('Data of thumbnail')
hidden = Field('True if the photo is hidden on website', bool)
def __init__(self, name):
BaseObject.__init__(self, name)
self.name = name
def __iscomplete__(self):
return (self.data and (not self.thumbnail_url or self.thumbnail_data))
def __str__(self):
return self.url
def __repr__(self):
return u'<ContactPhoto "%s" data=%do tndata=%do>' % (self.id,
len(self.data) if self.data else 0,
len(self.thumbnail_data) if self.thumbnail_data else 0)
class Contact(BaseObject):
"""
A contact.
"""
STATUS_ONLINE = 0x001
STATUS_AWAY = 0x002
STATUS_OFFLINE = 0x004
STATUS_ALL = 0xfff
name = StringField('Name of contact')
status = IntField('Status of contact (STATUS_* constants)')
url = StringField('URL to the profile of contact')
status_msg = StringField('Message of status')
summary = StringField('Description of contact')
photos = Field('List of photos', dict, default=OrderedDict())
profile = Field('Contact profile', dict, default=OrderedDict())
def __init__(self, id, name, status):
BaseObject.__init__(self, id)
self.name = name
self.status = status
def set_photo(self, name, **kwargs):
"""
Set photo of contact.
:param name: name of photo
:type name: str
:param kwargs: See :class:`ContactPhoto` to know what other parameters you can use
"""
if name not in self.photos:
self.photos[name] = ContactPhoto(name)
photo = self.photos[name]
for key, value in kwargs.iteritems():
setattr(photo, key, value)
def get_text(self):
def print_node(node, level=1):
result = u''
if node.flags & node.SECTION:
result += u'\t' * level + node.label + '\n'
for sub in node.value.itervalues():
result += print_node(sub, level + 1)
else:
if isinstance(node.value, (tuple, list)):
value = ', '.join(unicode(v) for v in node.value)
elif isinstance(node.value, float):
value = '%.2f' % node.value
else:
value = node.value
result += u'\t' * level + u'%-20s %s\n' % (node.label + ':', value)
return result
result = u'Nickname: %s\n' % self.name
if self.status & Contact.STATUS_ONLINE:
s = 'online'
elif self.status & Contact.STATUS_OFFLINE:
s = 'offline'
elif self.status & Contact.STATUS_AWAY:
s = 'away'
else:
s = 'unknown'
result += u'Status: %s (%s)\n' % (s, self.status_msg)
result += u'URL: %s\n' % self.url
result += u'Photos:\n'
for name, photo in self.photos.iteritems():
result += u'\t%s%s\n' % (photo, ' (hidden)' if photo.hidden else '')
result += u'\nProfile:\n'
for head in self.profile.itervalues():
result += print_node(head)
result += u'Description:\n'
for s in self.summary.split('\n'):
result += u'\t%s\n' % s
return result
class QueryError(UserError):
"""
Raised when unable to send a query to a contact.
"""
class Query(BaseObject):
"""
Query to send to a contact.
"""
message = StringField('Message received')
def __init__(self, id, message):
BaseObject.__init__(self, id)
self.message = message
class CapContact(Capability):
def iter_contacts(self, status=Contact.STATUS_ALL, ids=None):
"""
Iter contacts
:param status: get only contacts with the specified status
:type status: Contact.STATUS_*
:param ids: if set, get the specified contacts
:type ids: list[str]
:rtype: iter[:class:`Contact`]
"""
raise NotImplementedError()
def get_contact(self, id):
"""
Get a contact from his id.
The default implementation only calls iter_contacts()
with the proper values, but it might be overloaded
by backends.
:param id: the ID requested
:type id: str
:rtype: :class:`Contact` or None if not found
"""
l = self.iter_contacts(ids=[id])
try:
return l[0]
except IndexError:
return None
def send_query(self, id):
"""
Send a query to a contact
:param id: the ID of contact
:type id: str
:rtype: :class:`Query`
:raises: :class:`QueryError`
"""
raise NotImplementedError()
def get_notes(self, id):
"""
Get personal notes about a contact
:param id: the ID of the contact
:type id: str
:rtype: unicode
"""
raise NotImplementedError()
def save_notes(self, id, notes):
"""
Set personal notes about a contact
:param id: the ID of the contact
:type id: str
:returns: the unicode object to save as notes
"""
raise NotImplementedError()
|
sputnick-dev/weboob
|
weboob/capabilities/contact.py
|
Python
|
agpl-3.0
| 7,083
|
import unittest
import requests_mock
from canvasapi import Canvas
from canvasapi.authentication_provider import AuthenticationProvider
from tests import settings
from tests.util import register_uris
@requests_mock.Mocker()
class TestAuthenticationProvider(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with requests_mock.Mocker() as m:
register_uris({"account": ["get_by_id", "add_authentication_providers"]}, m)
self.account = self.canvas.get_account(1)
self.authentication_providers = self.account.add_authentication_providers(
authentication_providers={"auth_type": "Authentication Providers"}
)
# update()
def test_update_authentication_providers(self, m):
register_uris(
{"authentication_providers": ["update_authentication_providers"]}, m
)
new_auth_type = "New Authentication Providers"
self.authentication_providers.update(
authentication_providers={"auth_type": new_auth_type}
)
self.assertEqual(self.authentication_providers.auth_type, new_auth_type)
# delete()
def test_delete_authentication_providers(self, m):
register_uris(
{"authentication_providers": ["delete_authentication_providers"]}, m
)
deleted_authentication_providers = self.authentication_providers.delete()
self.assertIsInstance(deleted_authentication_providers, AuthenticationProvider)
self.assertTrue(hasattr(deleted_authentication_providers, "auth_type"))
self.assertEqual(
deleted_authentication_providers.auth_type, "Authentication Providers"
)
# __str__()
def test_str__(self, m):
string = str(self.authentication_providers)
self.assertIsInstance(string, str)
|
ucfopen/canvasapi
|
tests/test_authentication_providers.py
|
Python
|
mit
| 1,880
|
import random
def insertion_sort(items):
"""Return the list in sort order and the number of swaps.
Implements insertion sort algorithm.
"""
swaps = 0
# caches the size of the list
items_size = len(items)
# iterates over the list, skipping the first item,
# since there is no previous items to compare to
for i in xrange(1, items_size):
# stores the index of the previous item
j = i-1
# iterates over the list backwards
# and swaps items when necessary
while j >= 0 and items[i] < items[j]:
swaps = swaps + 1
# caches both values from i and j
item_i = items[i]
item_j = items[j]
# swap
items[j] = item_i
items[i] = item_j
# decrements j and i
# in order to be able to compare previous items
i = i - 1
j = j - 1
# returns the sorted list
return items, swaps
|
marioluan/data-structures-and-algorithms
|
algorithms/python/src/sorting_algorithms/insertion_sort.py
|
Python
|
mit
| 873
|
from __future__ import absolute_import, unicode_literals
import time
from datetime import timedelta
from djcelery_transactions import task
from django.utils import timezone
from redis_cache import get_redis_connection
from .models import CreditAlert, Invitation, Org, TopUpCredits
@task(track_started=True, name='send_invitation_email_task')
def send_invitation_email_task(invitation_id):
invitation = Invitation.objects.get(pk=invitation_id)
invitation.send_email()
@task(track_started=True, name='send_alert_email_task')
def send_alert_email_task(alert_id):
alert = CreditAlert.objects.get(pk=alert_id)
alert.send_email()
@task(track_started=True, name='check_credits_task')
def check_credits_task():
CreditAlert.check_org_credits()
@task(track_started=True, name='calculate_credit_caches')
def calculate_credit_caches():
"""
Repopulates the active topup and total credits for each organization
that received messages in the past week.
"""
# get all orgs that have sent a message in the past week
last_week = timezone.now() - timedelta(days=7)
# for every org that has sent a message in the past week
for org in Org.objects.filter(msgs__created_on__gte=last_week).distinct('pk'):
start = time.time()
org._calculate_credit_caches()
print " -- recalculated credits for %s in %0.2f seconds" % (org.name, time.time() - start)
@task(track_started=True, name="squash_topupcredits")
def squash_topupcredits():
r = get_redis_connection()
key = 'squash_topupcredits'
if not r.get(key):
with r.lock(key, timeout=900):
TopUpCredits.squash_credits()
|
ewheeler/rapidpro
|
temba/orgs/tasks.py
|
Python
|
agpl-3.0
| 1,662
|
from django.test import TestCase
from lib import grades
class GradesTestCase(TestCase):
def test_numeric_value_for_grade_retrieves_single_letter_grades(self):
for grade, value in grades.GRADE_MAPPINGS.iteritems():
self.assertEqual(grades.numeric_value_for_grade(grade), value)
def test_numeric_value_for_grade_handles_in_between_grades(self):
self.assertEqual(grades.numeric_value_for_grade(
'A/A-'), (11.0 + 12.0) / 2)
self.assertEqual(grades.numeric_value_for_grade(
'A-/B+'), (10.0 + 11.0) / 2)
self.assertEqual(grades.numeric_value_for_grade(
'B+/B'), (9.0 + 10.0) / 2)
self.assertEqual(grades.numeric_value_for_grade(
'B/B-'), (8.0 + 9.0) / 2)
self.assertEqual(grades.numeric_value_for_grade(
'B-/C+'), (7.0 + 8.0) / 2)
self.assertEqual(grades.numeric_value_for_grade(
'C+/C'), (6.0 + 7.0) / 2)
self.assertEqual(grades.numeric_value_for_grade(
'C/C-'), (5.0 + 6.0) / 2)
self.assertEqual(grades.numeric_value_for_grade(
'C-/D'), (3.0 + 5.0) / 2)
self.assertEqual(grades.numeric_value_for_grade(
'D/E'), (0.0 + 3.0) / 2)
|
layuplist/layup-list
|
apps/web/tests/lib_tests/test_grades.py
|
Python
|
gpl-3.0
| 1,236
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import MySQLdb
import MySQLdb.cursors
from airflow.hooks.dbapi_hook import DbApiHook
class MySqlHook(DbApiHook):
"""
Interact with MySQL.
You can specify charset in the extra field of your connection
as ``{"charset": "utf8"}``. Also you can choose cursor as
``{"cursor": "SSCursor"}``. Refer to the MySQLdb.cursors for more details.
"""
conn_name_attr = 'mysql_conn_id'
default_conn_name = 'mysql_default'
supports_autocommit = True
def __init__(self, *args, **kwargs):
super(MySqlHook, self).__init__(*args, **kwargs)
self.schema = kwargs.pop("schema", None)
def set_autocommit(self, conn, autocommit):
"""
MySql connection sets autocommit in a different way.
"""
conn.autocommit(autocommit)
def get_autocommit(self, conn):
"""
MySql connection gets autocommit in a different way.
:param conn: connection to get autocommit setting from.
:type conn: connection object.
:return: connection autocommit setting
:rtype bool
"""
return conn.get_autocommit()
def get_conn(self):
"""
Returns a mysql connection object
"""
conn = self.get_connection(self.mysql_conn_id)
conn_config = {
"user": conn.login,
"passwd": conn.password or '',
"host": conn.host or 'localhost',
"db": self.schema or conn.schema or ''
}
if not conn.port:
conn_config["port"] = 3306
else:
conn_config["port"] = int(conn.port)
if conn.extra_dejson.get('charset', False):
conn_config["charset"] = conn.extra_dejson["charset"]
if (conn_config["charset"]).lower() == 'utf8' or\
(conn_config["charset"]).lower() == 'utf-8':
conn_config["use_unicode"] = True
if conn.extra_dejson.get('cursor', False):
if (conn.extra_dejson["cursor"]).lower() == 'sscursor':
conn_config["cursorclass"] = MySQLdb.cursors.SSCursor
elif (conn.extra_dejson["cursor"]).lower() == 'dictcursor':
conn_config["cursorclass"] = MySQLdb.cursors.DictCursor
elif (conn.extra_dejson["cursor"]).lower() == 'ssdictcursor':
conn_config["cursorclass"] = MySQLdb.cursors.SSDictCursor
local_infile = conn.extra_dejson.get('local_infile', False)
if conn.extra_dejson.get('ssl', False):
conn_config['ssl'] = conn.extra_dejson['ssl']
if local_infile:
conn_config["local_infile"] = 1
conn = MySQLdb.connect(**conn_config)
return conn
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
"""
conn = self.get_conn()
cur = conn.cursor()
cur.execute("""
LOAD DATA LOCAL INFILE '{tmp_file}'
INTO TABLE {table}
""".format(**locals()))
conn.commit()
def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file
"""
conn = self.get_conn()
cur = conn.cursor()
cur.execute("""
SELECT * INTO OUTFILE '{tmp_file}'
FROM {table}
""".format(**locals()))
conn.commit()
@staticmethod
def _serialize_cell(cell, conn):
"""
MySQLdb converts an argument to a literal
when passing those separately to execute. Hence, this method does nothing.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The same cell
:rtype: object
"""
return cell
|
sid88in/incubator-airflow
|
airflow/hooks/mysql_hook.py
|
Python
|
apache-2.0
| 4,636
|
# -*- coding: utf-8 -*-
import datetime, time, csv, os
import numpy as np
from utils.db import SqliteDB
from utils.rwlogging import log
from utils.rwlogging import strategyLogger as logs
from utils.rwlogging import balLogger as logb
from strader import Trader
from indicator import ma, macd, bolling, rsi, kdj
from mas.maPool import Pool
mas = emas = smas = lwmas = std = prices = None
def runStrategy(in_prices):
global mas, emas, smas, lwmas, std, prices
log.debug('beginning ma strategy ...')
prices = in_prices
ps = [p['close'] for p in prices]
std = [0] * 51
l = len(prices)
for period in range(2, 51):
std[period] = [0] * l
for i in range(period - 1, l):
std[period][i] = np.std(ps[i-period+1 : i+1], dtype=np.float64, ddof=0)
mas = [0] * 81
emas = [0] * 81
smas = [0] * 81
lwmas = [0] * 81
for period in range(2, 81):
mas[period] = ma.calc_ma(ps, period)
emas[period] = ma.calc_ema(ps, period)
smas[period] = ma.calc_sma(ps, period)
lwmas[period] = ma.calc_lwma(ps, period)
pool = Pool(100)
#t = doTrade(pool, 25, 1.0, 'MA', 7, 'SMA', 12, 'EMA', 31, 'SMA', 7, 'MA', 12, 'MA', 13)
#t = doTrade(pool, 25, 1.3, 'MA', 7, 'SMA', 13, 'EMA', 31, 'SMA', 7, 'MA', 12, 'MA', 13)
#pool.showStrategies()
#return
log.debug('running ma strategy ...')
starttime = datetime.datetime.now()
matypes = ['MA', 'EMA', 'SMA', 'LWMA']
farr = range(5, 20)
s1arr = range(8, 40)
s2arr = range(0, 80)
poola = Pool(10)
poolb = Pool(10)
for stdPeriod in [20, ]:
for stdGuage in [1.0, ]:
log.debug('*** ' + str(stdPeriod) + ',' + str(stdGuage) + ' ***')
for ft, f in [(matype, period) for matype in matypes for period in farr]:
for s1t, s1 in [(matype, period) for matype in matypes for period in s1arr]:
if s1 != 0 and s1 <= f: continue
elapsed = (datetime.datetime.now() - starttime).seconds
log.debug('== ' + str(elapsed) + ',' + ft + '_' + str(f) + ',' + s1t + '_' + str(s1) + ' ==')
for s2t, s2 in [(matype, period) for matype in matypes for period in s2arr]:
if s2 != 0 and s2 <= s1: continue
if s2 == 0 and (s2t == 'EMA' or s2t == 'SMA' or s2t == 'LWMA'): continue
doTrade(poola, stdPeriod, stdGuage, ft, f, s1t, s1, s2t, s2, '', 0, '', 0, '', 0)
doTrade(poolb, stdPeriod, stdGuage, '', 0, '', 0, '', 0, ft, f, s1t, s1, s2t, s2)
poola.showStrategies()
poolb.showStrategies()
def doTrade(pool, stdPeriod, stdGuage, afmt, af, as1mt, as1, as2mt, as2, bfmt, bf, bs1mt, bs1, bs2mt, bs2):
global std, prices
sname = str(stdPeriod) + '_' + str(stdGuage)
sname += '_' + afmt + '_' + str(af) + '_' + as1mt + '_' + str(as1)
if as2 > 0: sname += '_' + as2mt + '_' + str(as2)
sname += '_' + bfmt + '_' + str(bf) + '_' + bs1mt + '_' +str(bs1)
if bs2 > 0: sname += '_' + bs2mt + '_' + str(bs2)
afma, as1ma, as2ma = getMas(afmt, af), getMas(as1mt, as1), getMas(as2mt, as2)
bfma, bs1ma, bs2ma = getMas(bfmt, bf), getMas(bs1mt, bs1), getMas(bs2mt, bs2)
front = max(as1, as2, bs1, bs2)
active = 0
a1pos = a2pos = b1pos = b2pos = 0
a1wait = a2wait = b1wait = b2wait = 0
a1price = a2price = b1price = b2price = 0
t = Trader(sname)
for i in range(front, len(prices)):
price = prices[i]
volume = 0
notes = ''
oa1pos, oa2pos, ob1pos , ob2pos = a1pos, a2pos, b1pos , b2pos
oa1wait, oa2wait, ob1wait, ob2wait = a1wait, a2wait, b1wait, b2wait
if std[stdPeriod][i] >= stdGuage: active = 2
else: active = 1
#A
if as1 > 0 and afma[i - 1] <= as1ma[i - 1] and afma[i] > as1ma[i]:
a1wait = 1
if as1 > 0 and afma[i - 1] >= as1ma[i - 1] and afma[i] < as1ma[i]:
a1wait = -1
if as2 > 0 and afma[i - 1] <= as2ma[i - 1] and afma[i] > as2ma[i]:
a2wait = 1
if as2 > 0 and afma[i - 1] >= as2ma[i - 1] and afma[i] < as2ma[i]:
a2wait = -1
if active == 1: a1pos, a2pos = a1wait, a2wait
if active != 1 and a1pos * a1wait == -1: a1pos = 0
if active != 1 and a2pos * a2wait == -1: a2pos = 0
if oa1pos != a1pos:
volume += a1pos - oa1pos
notes += 'A1:'+ str(oa1pos) + '->' + str(a1pos) + ';' + str(a1price) + '->' + str(price['rmb']) + ';'
a1price = price['rmb']
if oa2pos != a2pos:
volume += a2pos - oa2pos
notes += 'A2:'+ str(oa1pos) + '->' + str(a1pos) + ';' + str(a2price) + '->' + str(price['rmb']) + ';'
a2price = price['rmb']
#B
if bs1 > 0 and bfma[i - 1] <= bs1ma[i - 1] and bfma[i] > bs1ma[i]:
b1wait = 1
if bs1 > 0 and bfma[i - 1] >= bs1ma[i - 1] and bfma[i] < bs1ma[i]:
b1wait = -1
if bs2 > 0 and bfma[i - 1] <= bs2ma[i - 1] and bfma[i] > bs2ma[i]:
b2wait = 1
if bs2 > 0 and bfma[i - 1] >= bs2ma[i - 1] and bfma[i] < bs2ma[i]:
b2wait = -1
if active == 2: b1pos, b2pos = b1wait, b2wait
if active != 2 and b1pos * b1wait == -1: b1pos = 0
if active != 2 and b2pos * b2wait == -1: b2pos = 0
if ob1pos != b1pos:
volume += b1pos - ob1pos
notes += 'B1:'+ str(ob1pos) + '->' + str(b1pos) + ';' + str(b1price) + '->' + str(price['rmb']) + ';'
b1price = price['rmb']
if ob2pos != b2pos:
volume += b2pos - ob2pos
notes += 'B2:'+ str(ob2pos) + '->' + str(b2pos) + ';' + str(b2price) + '->' + str(price['rmb']) + ';'
b2price = price['rmb']
if volume != 0:
t.processOrder(price['dt'], price['rmb'], volume * 1000, notes=notes)
else:
t.summary(price['dt'], price['rmb'])
pool.estimate(t)
return t
def getMas(matype, period):
if matype == 'MA':
return mas[period]
elif matype == 'EMA':
return emas[period]
elif matype == 'SMA':
return smas[period]
elif matype == 'LWMA':
return lwmas[period]
else:
return None
|
rolandwz/pymisc
|
trader/mas/maStrategy.py
|
Python
|
mit
| 5,600
|
# -*- coding: utf-8 -*-
from micolog_plugin import *
from BeautifulSoup import *
from datetime import datetime
from model import Entry,Comment,Media
import logging,math
import re
from base import BaseRequestHandler,urldecode
class Importhandler(BaseRequestHandler):
def post(self):
if not self.is_login:
self.redirect(users.create_login_url(self.request.uri))
filename=self.param('filename')
do_comment=self.paramint('c',0)
if filename[:4]=='img/':#处理图片
new_filename=filename.split('/')[1]
mtype =new_filename.split('.')[1]
bits = self.request.body
media=Media.all().filter('name =',new_filename)
if media.count()>0:
media=media[0]
else:
media=Media()
media.name=new_filename
media.mtype=mtype
media.bits=bits
media.put()
bid='_'.join(new_filename.split('_')[:-1])
entries=Entry.all().filter('slug =',bid)
if entries.count()>0:
entry=entries[0]
entry.content=entry.content.replace(filename,'/media/'+str(media.key()))
entry.put()
return
if filename=="index.html" or filename[-5:]!='.html':
return
#处理html页面
bid=filename[:-5]
try:
soup=BeautifulSoup(self.request.body)
bp=soup.find(id='bp')
title=self.getChineseStr( soup.title.text)
logging.info(bid)
pubdate=self.getdate( bp.find(id='bp-'+bid+'-publish').text)
body=bp.find('div','blogpost')
entries=Entry.all().filter('title = ',title)
if entries.count()<1:
entry=Entry()
else:
entry=entries[0]
## entry=Entry.get_by_key_name(bid)
## if not entry:
## entry=Entry(key_name=bid)
entry.slug=bid
entry.title=title
entry.author_name=self.login_user.nickname()
entry.date=pubdate
entry.settags("")
entry.content=unicode(body)
entry.author=self.login_user
entry.save(True)
if do_comment>0:
comments=soup.find('div','comments','div')
if comments:
for comment in comments.contents:
name,date=comment.h5.text.split(' - ')
# modify by lastmind4
name_date_pair = comment.h5.text
if name_date_pair.index('- ') == 0:
name_date_pair = 'Anonymous ' + name_date_pair
name,date=name_date_pair.split(' - ')
key_id=comment.h5['id']
date=self.getdate(date)
content=comment.contents[1].text
comment=Comment.get_or_insert(key_id,content=content)
comment.entry=entry
comment.date=date
comment.author=name
comment.save()
except Exception,e :
logging.info("import error: %s"%e.message)
def getdate(self,d):
try:
ret=datetime.strptime(d,"%Y/%m/%d %H:%M:%S")
except:
try:
ret=datetime.strptime(d,"%m/%d/%Y %H:%M:%S %p")
except:
ret=datetime.now()
return ret
def getChineseStr(self,s):
return re.sub(r'&#(\d+);',lambda x:unichr(int(x.group(1))) ,s)
class live_import(Plugin_importbase):
def __init__(self):
Plugin_importbase.__init__(self,__file__,"spaces.live.com","Plugin for import entries from space.zip.")
self.author="xuming"
self.authoruri="http://xuming.net"
self.uri="http://xuming.net"
self.description='''Plugin for import entries from space.zip.<br>
将Spaces.Live.com博客导入到Micolog.'''
self.name="LiveSapce Import"
self.version="0.12"
self.register_urlzip('/admin/live_import/swfupload/(.*)','swfupload.zip')
self.register_urlhandler('/admin/live_import/import',Importhandler)
def get(self,page):
return self.render_content("import.html",{'name':self.name})
|
Alwnikrotikz/micolog2
|
plugins/live_import/live_import.py
|
Python
|
gpl-3.0
| 3,458
|
# Copyright (c) 2006-2009 The Trustees of Indiana University.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# - Neither the Indiana University nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from ptx_isa import *
# Nothing to see here, move along... ;)
__active_code = None
def set_active_code(code):
global __active_code
if __active_code is not None:
__active_code.set_active_callback(None)
__active_code = code
if code is not None:
code.set_active_callback(set_active_code)
return
# Property version
def __get_active_code(self):
global __active_code
return __active_code
# Free function version
def get_active_code():
global __active_code
return __active_code
for l in locals().values():
if isinstance(l, type):
if issubclass(l, ptxInstruction):
l.active_code = property(__get_active_code)
|
matthiaskramm/corepy
|
corepy/arch/ptx/isa/__init__.py
|
Python
|
bsd-3-clause
| 2,838
|
# firebird/__init__.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
from sqlalchemy.dialects.firebird.base import BIGINT
from sqlalchemy.dialects.firebird.base import BLOB
from sqlalchemy.dialects.firebird.base import CHAR
from sqlalchemy.dialects.firebird.base import DATE
from sqlalchemy.dialects.firebird.base import FLOAT
from sqlalchemy.dialects.firebird.base import NUMERIC
from sqlalchemy.dialects.firebird.base import SMALLINT
from sqlalchemy.dialects.firebird.base import TEXT
from sqlalchemy.dialects.firebird.base import TIME
from sqlalchemy.dialects.firebird.base import TIMESTAMP
from sqlalchemy.dialects.firebird.base import VARCHAR
from . import base # noqa
from . import fdb # noqa
from . import kinterbasdb # noqa
base.dialect = dialect = fdb.dialect
__all__ = (
"SMALLINT",
"BIGINT",
"FLOAT",
"FLOAT",
"DATE",
"TIME",
"TEXT",
"NUMERIC",
"FLOAT",
"TIMESTAMP",
"VARCHAR",
"CHAR",
"BLOB",
"dialect",
)
|
monetate/sqlalchemy
|
lib/sqlalchemy/dialects/firebird/__init__.py
|
Python
|
mit
| 1,153
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cherrypy
import htpc
import logging
import urllib2
import urllib
import base64
import json
from cherrypy.lib.auth2 import require
class TVHeadend(object):
def __init__(self):
self.logger = logging.getLogger('modules.tvheadend')
htpc.MODULES.append({
'name': 'TVHeadend',
'id': 'tvheadend',
'test': htpc.WEBDIR + 'TVHeadend/ping',
'fields': [
{'type': 'bool', 'label': 'Enable', 'name': 'tvheadend_enable'},
{'type': 'text', 'label': 'Menu name', 'name': 'tvheadend_name'},
{'type': 'text', 'label': 'IP / Host *', 'name': 'tvheadend_host'},
{'type': 'text', 'label': 'Port *', 'name': 'tvheadend_port'},
{'type': 'text', 'label': 'Username', 'name': 'tvheadend_username'},
{'type': 'password', 'label': 'Password', 'name': 'tvheadend_password'},
{'type': 'text', 'label': 'Reverse proxy link', 'placeholder': '', 'desc': 'Reverse proxy link, e.g. https://domain.com/tvh', 'name': 'tvheadend_reverse_proxy_link'},
]
})
@cherrypy.expose()
@require()
def index(self):
return htpc.LOOKUP.get_template("tvheadend.html").render(scriptname="tvheadend", webinterface=self.webinterface())
def webinterface(self):
ip = htpc.settings.get('tvheadend_host')
port = htpc.settings.get('tvheadend_port')
url = 'http://%s:%s/' % (ip, port)
if htpc.settings.get('tvheadend_reverse_proxy_link'):
url = htpc.settings.get('tvheadend_reverse_proxy_link')
return url
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetEPG(self, strLimit="300", strChannel=""):
return self.fetch("epg", {'limit': strLimit, 'start': "0", 'channel': strChannel })
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetChannels(self):
return self.fetch("api/channel/grid", { 'dir': 'ASC', 'sort': 'tags', 'limit': 1000})
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetChannelTags(self):
return self.fetch("channeltags", {'op': 'listTags'})
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def DVRAdd(self, strEventID=""):
return self.fetch("dvr", {'eventId': strEventID, 'op': "recordEvent"})
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def DVRDel(self, strEntryID=""):
return self.fetch("dvr", {'entryId': strEntryID, 'op': "cancelEntry"})
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def DVRList(self, strType=""):
return self.fetch("dvrlist_" + strType, None)
def fetch(self, strQuery, rgpData):
rgpHeaders = {}
username = htpc.settings.get("tvheadend_username", "")
password = htpc.settings.get("tvheadend_password", "")
if username and password:
rgpHeaders['Authorization'] = 'Basic %s' % base64.encodestring('%s:%s' % (username, password)).strip('\n')
# Lame debug to get as much info as possible
self.logger.debug('strQuery: %s' % strQuery)
self.logger.debug('rgpData: %s' % rgpData)
strResponse = None
strData = None
if rgpData is not None:
strData = urllib.urlencode(rgpData)
url = "http://%s:%s/%s" % (htpc.settings.get("tvheadend_host", ""), htpc.settings.get("tvheadend_port", ""), strQuery)
self.logger.debug('url: %s' % url)
self.logger.debug('encoded: %s' % strData)
try:
pRequest = urllib2.Request("http://%s:%s/%s" % (htpc.settings.get("tvheadend_host", ""), htpc.settings.get("tvheadend_port", ""), strQuery), data = strData, headers = rgpHeaders)
strResponse = urllib2.urlopen(pRequest).read()
return json.loads(strResponse)
except Exception as e:
self.logger.error('%s %s failed error: %s' % strQuery, rgpData, e)
|
scith/htpc-manager_ynh
|
sources/modules/tvheadend.py
|
Python
|
gpl-3.0
| 4,053
|
# encoding: utf-8
# module PyQt4.QtCore
# from /usr/lib/python2.7/dist-packages/PyQt4/QtCore.so
# by generator 1.135
# no doc
# imports
import sip as __sip
class QVariant(): # skipped bases: <type 'sip.simplewrapper'>
"""
QVariant()
QVariant(Type)
QVariant(int, sip.voidptr)
QVariant(QVariant)
QVariant(object)
"""
def canConvert(self, Type): # real signature unknown; restored from __doc__
""" QVariant.canConvert(Type) -> bool """
return False
def clear(self): # real signature unknown; restored from __doc__
""" QVariant.clear() """
pass
def convert(self, Type): # real signature unknown; restored from __doc__
""" QVariant.convert(Type) -> bool """
return False
def data(self): # real signature unknown; restored from __doc__
""" QVariant.data() -> sip.voidptr """
pass
def detach(self): # real signature unknown; restored from __doc__
""" QVariant.detach() """
pass
def fromList(self, list_of_QVariant): # real signature unknown; restored from __doc__
""" QVariant.fromList(list-of-QVariant) -> QVariant """
return QVariant
def fromMap(self, dict_of_QString_QVariant): # real signature unknown; restored from __doc__
""" QVariant.fromMap(dict-of-QString-QVariant) -> QVariant """
return QVariant
def isDetached(self): # real signature unknown; restored from __doc__
""" QVariant.isDetached() -> bool """
return False
def isNull(self): # real signature unknown; restored from __doc__
""" QVariant.isNull() -> bool """
return False
def isValid(self): # real signature unknown; restored from __doc__
""" QVariant.isValid() -> bool """
return False
def load(self, QDataStream): # real signature unknown; restored from __doc__
""" QVariant.load(QDataStream) """
pass
def nameToType(self, p_str): # real signature unknown; restored from __doc__
""" QVariant.nameToType(str) -> Type """
pass
def save(self, QDataStream): # real signature unknown; restored from __doc__
""" QVariant.save(QDataStream) """
pass
def swap(self, QVariant): # real signature unknown; restored from __doc__
""" QVariant.swap(QVariant) """
pass
def toBitArray(self): # real signature unknown; restored from __doc__
""" QVariant.toBitArray() -> QBitArray """
return QBitArray
def toBool(self): # real signature unknown; restored from __doc__
""" QVariant.toBool() -> bool """
return False
def toByteArray(self): # real signature unknown; restored from __doc__
""" QVariant.toByteArray() -> QByteArray """
return QByteArray
def toChar(self): # real signature unknown; restored from __doc__
""" QVariant.toChar() -> QChar """
return QChar
def toDate(self): # real signature unknown; restored from __doc__
""" QVariant.toDate() -> QDate """
return QDate
def toDateTime(self): # real signature unknown; restored from __doc__
""" QVariant.toDateTime() -> QDateTime """
return QDateTime
def toDouble(self): # real signature unknown; restored from __doc__
""" QVariant.toDouble() -> (float, bool) """
pass
def toEasingCurve(self): # real signature unknown; restored from __doc__
""" QVariant.toEasingCurve() -> QEasingCurve """
return QEasingCurve
def toFloat(self): # real signature unknown; restored from __doc__
""" QVariant.toFloat() -> (float, bool) """
pass
def toHash(self): # real signature unknown; restored from __doc__
""" QVariant.toHash() -> dict-of-QString-QVariant """
pass
def toInt(self): # real signature unknown; restored from __doc__
""" QVariant.toInt() -> (int, bool) """
pass
def toLine(self): # real signature unknown; restored from __doc__
""" QVariant.toLine() -> QLine """
return QLine
def toLineF(self): # real signature unknown; restored from __doc__
""" QVariant.toLineF() -> QLineF """
return QLineF
def toList(self): # real signature unknown; restored from __doc__
""" QVariant.toList() -> list-of-QVariant """
pass
def toLocale(self): # real signature unknown; restored from __doc__
""" QVariant.toLocale() -> QLocale """
return QLocale
def toLongLong(self): # real signature unknown; restored from __doc__
""" QVariant.toLongLong() -> (int, bool) """
pass
def toMap(self): # real signature unknown; restored from __doc__
""" QVariant.toMap() -> dict-of-QString-QVariant """
pass
def toPoint(self): # real signature unknown; restored from __doc__
""" QVariant.toPoint() -> QPoint """
return QPoint
def toPointF(self): # real signature unknown; restored from __doc__
""" QVariant.toPointF() -> QPointF """
return QPointF
def toPyObject(self): # real signature unknown; restored from __doc__
""" QVariant.toPyObject() -> object """
return object()
def toReal(self): # real signature unknown; restored from __doc__
""" QVariant.toReal() -> (float, bool) """
pass
def toRect(self): # real signature unknown; restored from __doc__
""" QVariant.toRect() -> QRect """
return QRect
def toRectF(self): # real signature unknown; restored from __doc__
""" QVariant.toRectF() -> QRectF """
return QRectF
def toRegExp(self): # real signature unknown; restored from __doc__
""" QVariant.toRegExp() -> QRegExp """
return QRegExp
def toSize(self): # real signature unknown; restored from __doc__
""" QVariant.toSize() -> QSize """
return QSize
def toSizeF(self): # real signature unknown; restored from __doc__
""" QVariant.toSizeF() -> QSizeF """
return QSizeF
def toString(self): # real signature unknown; restored from __doc__
""" QVariant.toString() -> QString """
return QString
def toStringList(self): # real signature unknown; restored from __doc__
""" QVariant.toStringList() -> QStringList """
return QStringList
def toTime(self): # real signature unknown; restored from __doc__
""" QVariant.toTime() -> QTime """
return QTime
def toUInt(self): # real signature unknown; restored from __doc__
""" QVariant.toUInt() -> (int, bool) """
pass
def toULongLong(self): # real signature unknown; restored from __doc__
""" QVariant.toULongLong() -> (int, bool) """
pass
def toUrl(self): # real signature unknown; restored from __doc__
""" QVariant.toUrl() -> QUrl """
return QUrl
def type(self): # real signature unknown; restored from __doc__
""" QVariant.type() -> Type """
pass
def typeName(self): # real signature unknown; restored from __doc__
""" QVariant.typeName() -> str """
return ""
def typeToName(self, Type): # real signature unknown; restored from __doc__
""" QVariant.typeToName(Type) -> str """
return ""
def userType(self): # real signature unknown; restored from __doc__
""" QVariant.userType() -> int """
return 0
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
BitArray = 13
Bitmap = 73
Bool = 1
Brush = 66
ByteArray = 12
Char = 7
Color = 67
Cursor = 74
Date = 14
DateTime = 16
Double = 6
EasingCurve = 29
Font = 64
Hash = 28
Icon = 69
Image = 70
Int = 2
Invalid = 0
KeySequence = 76
Line = 23
LineF = 24
List = 9
Locale = 18
LongLong = 4
Map = 8
Matrix = 80
Matrix4x4 = 82
Palette = 68
Pen = 77
Pixmap = 65
Point = 25
PointF = 26
Polygon = 71
Quaternion = 86
Rect = 19
RectF = 20
RegExp = 27
Region = 72
Size = 21
SizeF = 22
SizePolicy = 75
String = 10
StringList = 11
TextFormat = 79
TextLength = 78
Time = 15
Transform = 81
Type = None # (!) real value is ''
UInt = 3
ULongLong = 5
Url = 17
UserType = 127
Vector2D = 83
Vector3D = 84
Vector4D = 85
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247972723/PyQt4/QtCore/__init__/QVariant.py
|
Python
|
gpl-2.0
| 9,340
|
import abc
import os
import numpy as np
import pandas as pd
from scipy.io import loadmat
from scipy.misc import imread, imresize
from cvds.dataset_base import *
from cvds.meta.path import get_path
class Caltech101(CategoryNestedDataSetBase):
"""
The Caltech-101 dataset
"""
@property
def name(self):
return "caltech-101"
class Caltech256(CategoryNestedDataSetBase):
"""
The Caltech-256 dataset
"""
@property
def name(self):
return "caltech-256"
class CUB_200_2011(CategoryNestedDataSetBase):
"""
Birds dataset
http://www.vision.caltech.edu/visipedia/CUB-200-2011.html
"""
@property
def name(self):
return "CUB-200-2011"
class Flowers102(DataSetBase):
"""
Oxford (VGG) flowers dataset
http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html
"""
@property
def name(self):
return "flowers102"
def get_image_names_and_labels(self):
labels = loadmat(os.path.join(self.path, "imagelabels.mat"))["labels"].ravel()
file_names = np.array(["image_{:05}.jpg".format(i + 1) for i in range(len(labels))])
return file_names, labels
def get_image(self, name):
return imread(os.path.join(self.path, "jpg", name))
class Flowers17(DataSetBase):
"""
http://www.robots.ox.ac.uk/~vgg/data/bicos/
--
# TODO: common base for flat datasets with external class labels.
"""
@property
def name(self):
return "flowers17"
def get_image_names_and_labels(self):
labels = loadmat(os.path.join(self.path, "imagelabels.mat"))["labels"].ravel()
file_names = np.array(["image_{:04}.jpg".format(i + 1) for i in range(len(labels))])
return file_names, labels
def get_image(self, name):
return imread(os.path.join(self.path, "jpg", name))
class Aircraft(DataSetBase):
"""
# TODO: allow to load all the labels, not just manufacturer
"""
@property
def name(self):
return "aircraft"
def get_image_names_and_labels(self):
f = pd.concat([pd.read_csv(os.path.join(self.path, "images_manufacturer_{}.txt".format(dset)), header=None) for
dset in("train", "val", "test")])
file_names = f[0].apply(lambda v: v.split(" ")[0]).values
labels = f[0].apply(lambda v: " ".join(v.split(" ")[1:])).values
return file_names, labels
def get_image(self, name):
return imread(os.path.join(self.path, "images", "{}.jpg".format(name)))
class Pets(SeparatorAppendedDataSetBase):
@property
def name(self):
return "pets"
class Actions40(SeparatorAppendedDataSetBase):
@property
def name(self):
return "stanford-40"
class FacesWild(CategoryNestedDataSetBase):
@property
def name(self):
return "faces.lfw"
class FacesWildAligned(CategoryNestedDataSetBase):
@property
def name(self):
return "faces.lfw.aligned"
class PascalVOCBase(DataSetBase):
@abc.abstractproperty
def name(self):
pass
def get_image_names_and_labels(self):
img_lists = os.listdir(os.path.join(self.path, "ImageSets", "Main"))
img_lists = [f for f in img_lists if f.endswith("_trainval.txt")]
def load_list(l):
data = pd.read_csv(os.path.join(self.path, "ImageSets", "Main", l), header=None)
data = data.apply(lambda row: pd.Series(row[0].replace(" ", " ").split(" ")), axis=1)
return data[data[1].astype(int) == 1][0].values
images_labels = {l:load_list(l) for l in img_lists}
images = []
labels = []
for label, image_list in images_labels.items():
images.extend(image_list)
labels.extend([label.split("_")[0]]*image_list.shape[0])
return np.array(images), np.array(labels)
def get_image(self, name):
return imread(os.path.join(self.path, "JPEGImages", "{}.jpg".format(name)))
class PascalVOC2007(PascalVOCBase):
@property
def name(self):
return "VOC2007"
class PascalVOC2012(PascalVOCBase):
@property
def name(self):
return "VOC2012"
class ImageNet2012(DataSetBase):
"""
--
"""
@property
def name(self):
return "ImageNet2012"
def get_image_names_and_labels(self):
file_names = sorted([f for f in os.listdir(self.path) if f.lower().endswith("jpeg")])
labels = labels = pd.read_csv(os.path.join(self.path, "LOC_val_solution.csv"), index_col=0).sort_index()
labels["label"] = labels.PredictionString.apply(lambda s: s.split(" ")[0])
mapping = [row.split(" ")[0] for row in open(os.path.join(self.path, "LOC_synset_mapping.txt"), "r")]
mapping_dict = {k: i + 1 for i, k in enumerate(mapping)}
labels = [mapping_dict[l] for l in labels.label.values]
return np.array(file_names), np.array(labels)
def get_image(self, name):
return imread(os.path.join(self.path, name), mode="RGB")
if __name__ == "__main__":
ds = ImageNet2012(use_classes=[1, 2], image_size=(100, 100))
print(ds.name)
print(ds.path)
imgs, lbls = ds.get_image_names_and_labels()
print(pd.Series(lbls).value_counts())
print(ds.dataset())
print(next(ds.dataset_blocks(block_size=100, num_blocks=1)))
|
Hezi-Resheff/vision-datasets
|
cvds/datasets.py
|
Python
|
gpl-3.0
| 5,330
|
from mock import *
from gp_unittest import *
from gppylib.operations.package import IsVersionCompatible
class IsVersionCompatibleTestCase(GpTestCase):
def setUp(self):
self.gppkg_mock_values = \
{'main_rpm': 'plperl-1.1-2.x86_64.rpm',
'postupdate': [],
'pkgname': 'plperl',
'description': 'some description.',
'postinstall': [{'Master': "some reason to restart database"}],
'postuninstall': [],
'abspath': 'plperl-ossv5.12.4_pv1.3_gpdb4.3-rhel5-x86_64.gppkg',
'preinstall': [],
'version': 'ossv5.12.4_pv1.2_gpdb4.3',
'pkg': 'plperl-ossv5.12.4_pv1.3_gpdb4.3-rhel5-x86_64.gppkg',
'dependencies': [],
'file_list': ['deps',
'gppkg_spec.yml',
'plperl-1.1-2.x86_64.rpm'],
'gpdbversion': Mock(),
'preuninstall': [],
'os': 'rhel5',
'architecture': 'x86_64'}
self.apply_patches([
patch('gppylib.operations.package.logger',
return_value=Mock(spec=['log', 'info', 'debug', 'error'])),
])
self.mock_logger = self.mock_objs[0]
def _is_requires_orca_logged(self, gppkg_name, log_messages):
return ('Greenplum Database requires orca version of '
'%s' % gppkg_name in log_messages)
@patch('gppylib.operations.package.GpVersion',
return_value=Mock(version=[4, 3, 10, 0]))
def test__execute_reports_incompatability(self, mock_gpversion):
logger = self.mock_logger
gppkg_mock_values = self.gppkg_mock_values
gppkg = Mock(**gppkg_mock_values)
subject = IsVersionCompatible(gppkg)
subject.execute()
gppkg_name = 'plperl-ossv5.12.4_pv1.3_gpdb4.3-rhel5-x86_64.gppkg'
# call object is a tuple of method name and arg list tuple
log_messages = [args[1][0] for args in logger.method_calls]
self.assertTrue(self._is_requires_orca_logged(gppkg_name,
log_messages))
@patch('gppylib.operations.package.GpVersion',
return_value=Mock(version=[4, 3, 3, 0]))
def test__execute_reports_compatability_with_older_version(self,
mock_gpversion):
logger = self.mock_logger
gppkg_mock_values = self.gppkg_mock_values
gppkg = Mock(**gppkg_mock_values)
subject = IsVersionCompatible(gppkg)
subject.execute()
gppkg_name = 'plperl-ossv5.12.4_pv1.3_gpdb4.3-rhel5-x86_64.gppkg'
# call object is a tuple of method name and arg list tuple
log_messages = [args[1][0] for args in logger.method_calls]
self.assertFalse(self._is_requires_orca_logged(gppkg_name,
log_messages))
def test__execute_compatible(self):
logger = self.mock_logger
gppkg_name = 'plperl-ossv5.12.4_pv1.3_gpdb4.3orca-rhel5-x86_64.gppkg'
modified_gppkg_mock_values = \
{'abspath': gppkg_name,
'version': 'ossv5.12.4_pv1.2_gpdb4.3orca',
'pkg': gppkg_name}
gppkg_mock_values = self.gppkg_mock_values
gppkg_mock_values.update(**modified_gppkg_mock_values)
gppkg = Mock(**gppkg_mock_values)
subject = IsVersionCompatible(gppkg)
subject.execute()
log_messages = [args[1][0] for args in logger.method_calls]
self.assertFalse(self._is_requires_orca_logged(gppkg_name,
log_messages))
if __name__ == '__main__':
run_tests()
|
lintzc/gpdb
|
gpMgmt/bin/gppylib/test/unit/test_unit_package.py
|
Python
|
apache-2.0
| 3,737
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.