prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
from kolibri.core.tasks.job import Job
from kolibri.core.tasks.job import State
from kolibri.core.tasks.storage import Storage
DEFAULT_QUEUE = "ICEQUBE_DEFAULT_QUEUE"
class Queue(object):
def __init__(self, queue=DEFAULT_QUEUE, connection=None):
if connection is None:
raise ValueError("Connection must be defined")
self.name = queue
self.storage = Storage(connection)
def __len__(self):
| return self.storage.count_all_jobs(self.name)
@property
def job_ids(self):
| return [job.job_id for job in self.storage.get_all_jobs(self.name)]
@property
def jobs(self):
"""
Return all the jobs scheduled, queued, running, failed or completed.
Returns: A list of all jobs.
"""
return self.storage.get_all_jobs(self.name)
def enqueue(self, func, *args, **kwargs):
"""
Enqueues a function func for execution.
One special parameter is track_progress. If passed in and not None, the func will be passed in a
keyword parameter called update_progress:
def update_progress(progress, total_progress, stage=""):
The running function can call the update_progress function to notify interested parties of the function's
current progress.
Another special parameter is the "cancellable" keyword parameter. When passed in and not None, a special
"check_for_cancel" parameter is passed in. When called, it raises an error when the user has requested a job
to be cancelled.
The caller can also pass in any pickleable object into the "extra_metadata" parameter. This data is stored
within the job and can be retrieved when the job status is queried.
All other parameters are directly passed to the function when it starts running.
:type func: callable or str
:param func: A callable object that will be scheduled for running.
:return: a string representing the job_id.
"""
# if the func is already a job object, just schedule that directly.
if isinstance(func, Job):
job = func
# else, turn it into a job first.
else:
job = Job(func, *args, **kwargs)
job.state = State.QUEUED
job_id = self.storage.enqueue_job(job, self.name)
return job_id
def cancel(self, job_id):
"""
Mark a job as canceling, and let the worker pick this up to initiate
the cancel of the job.
:param job_id: the job_id of the Job to cancel.
"""
self.storage.mark_job_as_canceling(job_id)
def fetch_job(self, job_id):
"""
Returns a Job object corresponding to the job_id. From there, you can query for the following attributes:
- function string to run
- its current state (see Job.State for the list of states)
- progress (returning an int), total_progress (returning an int), and percentage_progress
(derived from running job.progress/total_progress)
- the job.exception and job.traceback, if the job's function returned an error
:param job_id: the job_id to get the Job object for
:return: the Job object corresponding to the job_id
"""
return self.storage.get_job(job_id)
def empty(self):
"""
Clear all jobs.
"""
self.storage.clear(force=True, queue=self.name)
def clear(self):
"""
Clear all succeeded, failed, or cancelled jobs.
"""
self.storage.clear(force=False, queue=self.name)
def clear_job(self, job_id):
"""
Clear a job if it has succeeded, failed, or been cancelled.
:type job_id: str
:param job_id: id of job to clear.
"""
self.storage.clear(job_id=job_id, force=False)
|
__author__ = 'PaleNeutron'
import os
from urllib.parse import urlparse, unquote
import sys
from PyQt5 import QtWidgets, QtCore, QtGui
class MyMainWindo | w(QtWidgets.QMainWindow):
file_loaded = QtCore.pyqtSignal(str)
image_loaded = QtCore.pyqtSignal(QtGui.QImage)
def __init__(self):
super(MyMainWindow, self).__init__()
self.windowList = []
self.text_path = ''
self.epub_path = ''
self.win_file_mime = "application/x-qt-windows-mime;value=\"FileNameW\""
self.text_uri_mime = "text/uri-list"
self.create_cont | ent_browser()
def create_content_browser(self):
self.content_browser = QtWidgets.QTextBrowser()
self.content_browser.setFontPointSize(12)
self.content_browser.setGeometry(QtCore.QRect(300, 150, 600, 400))
self.windowList.append(self.content_browser)
def dragEnterEvent(self, ev):
ev.accept()
def load_file(self, file_path):
self.file_loaded.emit(file_path)
# def image_loaded(self, file_path):
# with open(file_path, "b") as f:
# r = f.read()
# with open("images/cover.jpg", "wb") as f:
# f.write(r)
# def epub_loaded(self, file_path):
# self.epub_path = file_path
# self.file_loaded.emit(False, )
def uri_to_path(self, uri):
if sys.platform == "win32":
path = unquote(urlparse(uri).path)[1:]
elif sys.platform == "linux":
path = unquote(urlparse(uri).path)
else:
path = None
return path
def dropEvent(self, ev):
# formats = ev.mimeData().formats()
# for i in formats:
# print(i)
# if ev.mimeData().hasFormat(self.win_file_mime):
# ev.accept()
# file_path = bytes(ev.mimeData().data(self.win_file_mime).data())[:-2].decode('utf16')
# if file_path.endswith(".txt"):
# self.text_loaded(file_path)
# elif file_path.endswith(".jpg") or file_path.endswith(".jpeg") or file_path.endswith(".png"):
# self.image_loaded(file_path)
# elif file_path.endswith(".epub"):
# self.epub_loaded(file_path)
# print(file_path)
if ev.mimeData().hasImage():
self.image_loaded.emit(ev.mimeData().imageData())
if ev.mimeData().hasFormat(self.text_uri_mime):
uri = ev.mimeData().data(self.text_uri_mime).data().decode("utf8").strip()
file_path = self.uri_to_path(uri)
if uri.lower().endswith(".txt") or uri.lower().endswith(".epub"):
self.load_file(file_path)
elif uri.lower().endswith(".zip"):
#ๆๅผไธไธชzipๆๆกฃ๏ผ่ทๅๅ
ถไธญ็txt
import zipfile
zf = zipfile.ZipFile(file_path)
for filename in zf.namelist():
#ๅฆๆๆๆกฃไธญtxtๆไปถๅคงไบ10kbๅ่งฃๅๅฐๅฝๅๆไปถๅคน
if filename.lower().endswith(".txt") and zf.getinfo(filename).file_size > 10 * 1024:
zf.extract(filename)
# ๅ้ๆไปถไฝ็ฝฎไฟกๅท
self.load_file(os.curdir + os.sep + filename)
break
elif uri.lower().endswith(".rar"):
import rarfile
rf = rarfile.RarFile(file_path)
for filename in rf.namelist():
# ๅฆๆๆๆกฃไธญtxtๆไปถๅคงไบ10kbๅ่งฃๅๅฐๅฝๅๆไปถๅคน
if filename.lower().endswith(".txt") and rf.getinfo(filename).file_size > 10 * 1024:
rf.extract(filename)
#ๅ้ๆไปถไฝ็ฝฎไฟกๅท
self.load_file(os.curdir + os.sep + filename)
break
else:
ev.ignore() |
#!/usr/bin/python
# (c) 2012, Stephen Fromm <sfromm@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: seboolean
short_description: Toggles SELinux booleans.
description:
- Toggles SELinux booleans.
version_added: "0.7"
options:
name:
description:
- Name of the boolean to configure
required: true
default: null
persistent:
description:
- Set to C(yes) if the boolean setting should survive a reboot
required: false
default: no
choices: [ "yes", "no" ]
state:
description:
- Desired boolean value
required: true
default: null
choices: [ 'yes', 'no' ]
notes:
- Not tested on any debian based system
requirements: [ libselinux-python, libsemanage-python ]
author: "Stephen Fromm (@sfromm)"
'''
EXAMPLES = '''
# Set (httpd_can_network_connect) flag on and keep it persistent across reboots
- seboolean:
name: httpd_can_network_connect
state: yes
persistent: yes
'''
try:
import selinux
HAVE_SELINUX=True
except ImportError:
HAVE_SELINUX=False
try:
import semanage
HAVE_SEMANAGE=True
except ImportError:
HAVE_SEMANAGE=False
def has_boolean_value(module, name):
bools = []
try:
rc, bools = selinux.security_get_boolean_names()
except OSError:
module.fail_json(msg="Failed to get list of boolean names")
if to_bytes(name) in bools:
return True
else:
return False
def get_boolean_value(module, name):
state = 0
try:
state = selinux.security_get_boolean_active(name)
except OSError:
module.fail_json(msg="Failed to determine current state for boolean %s" % name)
if state == 1:
return True
else:
return False
# The following method implements what setsebool.c does to change
# a boolean and make it persist after reboot..
def semanage_boolean_value(module, name, state):
rc = 0
value = 0
if state:
value = 1
handle = semanage.semanage_handle_create()
if handle is None:
module.fail_json(msg="Failed to create semanage library handle")
try:
managed = semanage.semanage_is_managed(handle)
if managed < 0:
module.fail_json(msg="Failed to determine whether policy is manage")
if managed == 0:
if os.getuid() == 0:
module.fail_json(msg="Cannot set persistent booleans without managed policy")
else:
module.fail_json(msg="Cannot set persistent booleans; please try as root")
if semanage.semanage_connect(handle) < 0:
module.fail_json(msg="Failed to connect to semanage")
if semanage.semanage_begin_transaction(handle) < 0:
module.fail_json(msg="Failed to begin semanage transaction")
rc, sebool = semanage.semanage_bool_create(handle)
if rc < 0:
module.fail_json(msg="Failed to create seboolean with semanage")
if semanage.semanage_bool_set_name(handle, sebool, name) < 0:
module.fail_json(msg="Failed to set seboolean name with semanage")
semanage.semanage_bool_set_value(sebool, value)
rc, boolkey = semanage.semanage_bool_key_extract(handle, sebool)
if rc < 0:
module.fail_json(msg="Failed to extract boolean key with semanage")
if semanage.semanage_bool_modify_local(handle, boolkey, sebool) < 0:
module.fail_json(msg="Failed to modify boolean key with semanage")
if semanage.semanage_bool_set_active(handle, boolkey, sebool) < 0:
module.fail_json(msg="Failed to set boolean key active with semanage")
semanage.semanage_bool_key_free(boolkey)
semanage.semanage_bool_free(sebool)
semanage.semanage_set_reload(handle, 0)
if semanage.semanage_commit(handle) < 0:
module.fail_json(msg="Failed to commit changes to semanage")
semanage.semanage_disconnect(handle)
semanage.semanage_handle_destroy(handle)
except Exception:
e = get_exception()
module.fail_json(msg="Failed to manage policy for boolean %s: %s" % (name, str(e)))
return True
def set_boolean_value(module, name, state):
rc = 0
value = 0
if state:
value = 1
try:
rc = selinux.security_set_boolean(name, value)
except OSError:
module.fail_json(msg="Failed to set boolean %s to %s" % (name, value))
if rc == 0:
return True
else:
return False
def main():
module = AnsibleModule(
argument_spec = dict(
name=dict(required=True),
persistent=dict(default='no', type='bool'),
state=dict(required=True, type='bool')
),
supports_check_mode=True
)
if not HAVE_SELINUX:
module.fail_json(msg="This module requires libselinux-python support")
if not HAVE_SEMANAGE:
module.fail_json(msg="This module requires libsemanage-python support")
if not selinux.is_selinux_enabled():
module.fail_json(msg="SELinux is disabled on this host.")
name = module.params['name']
persistent = module.params['persistent']
state = module.params['state']
result = {}
result['name'] = name
if hasattr(selinux, 'selinux_boolean_sub'):
# selinux_boolean_sub allows sites to rename a boolean and alias the old name
# Feature only available in selinux library since | 2012.
name = selinux.selinux_boolean_sub(name)
if not has_boolean_value(module, name):
module.fail_json(msg="SELinux boolean %s does not exist." % name)
cur_value = get_boolean_value(module, name)
if cur_value == state:
result['state'] | = cur_value
result['changed'] = False
module.exit_json(**result)
if module.check_mode:
module.exit_json(changed=True)
if persistent:
r = semanage_boolean_value(module, name, state)
else:
r = set_boolean_value(module, name, state)
result['changed'] = r
if not r:
module.fail_json(msg="Failed to set boolean %s to %s" % (name, value))
try:
selinux.security_commit_booleans()
except:
module.fail_json(msg="Failed to commit pending boolean %s value" % name)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils._text import to_bytes
if __name__ == '__main__':
main()
|
-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024 * 64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
""" This only exists to be able to attach a .close method to iterators that
do not support attribute | assignment (most of itertools). """
def __init__(self, iterator, close=None):
self.iterato | r = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
""" This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
"""
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = opener
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
""" Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
"""
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
""" Iterate over all existing files in all registered paths. """
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
""" Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. """
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
""" Find a resource and return a file object, or raise IOError. """
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
""" Wrapper for file uploads. """
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
@cached_property
def filename(self):
""" Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
"""
fname = self.raw_filename
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname)
fname = fname.encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
def _copy_file(self, fp, chunk_size=2 ** 16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2 ** 16):
""" Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
"""
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
|
import numpy as np
def zero_mean_normalize_image_data(data, axis=(0, 1, 2)):
return np.divide(data - data.mean(axis=axis), data.std(axis=axis))
def foreground_zero_mean_normalize_image_data(data, channel_dim=4, background_value=0, tolerance=1e-5):
data = np.copy(data)
if data.ndim == channel_dim or data.shape[channel_dim] == 1:
# only 1 channel, so the std and mean calculations are straight forward
foreground_mask = np.abs(data) > (background_value + tolerance)
foreground = data[foreground_mask]
mean = foreground.mean()
std = foreground.std()
data[foreground_mask] = np.divide(foreground - mean, std)
return data
else:
# std and mean need to be calculated for each channel in the 4th dimension
for channel in range(data.shape[channel_dim]):
channel_data = data[..., channel]
channel_mask = np.abs(channel_data) > (background_value + tolerance)
channel_foreground = channel_data[channel_mask]
channel_mean = channel_foreground.mean()
channel_std = channel_foreground.std()
channel_data[channel_mask] = np.divide(channel_foreground - channel_mean, channel_std)
data[..., channel] = channel_data
return data
def zero_floor_normalize_image_data(data, axis=(0, 1, 2), floor_percentile=1, floor=0):
floor_threshold = np.percentile(data, floor_percentile, axis=axis)
if data.ndim != len(axis):
floor_threshold_shape = np.asarray(floor_threshold.shape * data.ndim)
floor_threshold_shape[np.asarray(axis)] = 1
floor_threshold = floor_threshold.reshape(floor_threshold_shape)
background = data <= floor_threshold
data = np.ma.masked_array(data - floor_threshold, mask=background)
std = data.std(axis=axis)
if data.ndim != len(axis):
std = std.reshape(floor_threshold_shape)
return np.divide(data, std).filled(floor)
def zero_one_window(data, axis=(0, 1, 2), ceiling_percentile=99, floor_percentile=1, floor=0, ceiling=1,
channels_axis=None):
"""
:param data: Numpy ndarray.
:param axis:
:param ceiling_percentile: Percentile value of the foreground to set to the ceiling.
:param floor_percentile: Percentile value of the image to set to the floor.
:param floor: New minimum value.
:param ceiling: New maximum value.
:param channels_axis:
:return:
"""
data = np.copy(data)
if len(axis) != data.ndim:
floor_threshold = np.percentile(data, floor_percentile, axis=axis)
if channels_axis is None:
channels_axis = find_channel_axis(data.ndim, axis=axis)
data = np.moveaxis(data, channels_axis, 0)
for channel in range(data.shape[0]):
channel_data = data[channel]
# find the background
bg_mask = channel_data <= floor_threshold[channel]
# use background to find foreground
fg = channel_data[bg_mask == False]
# find threshold based on foreground percentile
ceiling_threshold = np.percentile(fg, ceiling_percentile)
# normalize the data for this channel
data[channel] = window_data(channel_data, floor_threshold=floor_threshold[channel],
ceiling_threshold=ceiling_threshold, floor=floor, ceiling=ceiling)
data = np.moveaxis(data, 0, channels_axis)
else:
floor_threshold = np.percentile(data, floor_percentile)
fg_mask = data > floor_threshold
fg = data[fg_mask]
ceiling_threshold = np.percentile(fg, ceiling_percentile)
data = window_data(data, floor_threshold=floor_threshold, ceiling_threshold=ceiling_threshold, floor=floor,
ceiling=ceiling)
return data
def find_channel_axis(ndim, axis):
for i in range(ndim):
if i not in axis and (i - ndim) not in axis:
# I don't understand the second part of this if statement
# answer: it is checking ot make sure that the axis is not indexed in reverse (i.e. axis 3 might be
# indexed as -1)
channels_axis = i
return channels_axis
def static_windows(data, windows, floor=0, ceiling=1):
"""
Normalizes the data according to a set of predefined windows. This is helpful for CT normalization where the
units are static and radiologists often have a set of windowing parameters that the use that allow them to look at
different features in the image.
:param da | ta: 3D | numpy array.
:param windows:
:param floor: defaults to 0.
:param ceiling: defaults to 1.
:return: Array with data windows listed in the final dimension
"""
data = np.squeeze(data)
normalized_data = np.ones(data.shape + (len(windows),)) * floor
for i, (l, w) in enumerate(windows):
normalized_data[..., i] = radiology_style_windowing(data, l, w, floor=floor, ceiling=ceiling)
return normalized_data
def radiology_style_windowing(data, l, w, floor=0, ceiling=1):
upper = l + w/2
lower = l - w/2
return window_data(data, floor_threshold=lower, ceiling_threshold=upper, floor=floor, ceiling=ceiling)
def window_data(data, floor_threshold, ceiling_threshold, floor, ceiling):
data = (data - floor_threshold) / (ceiling_threshold - floor_threshold)
# set the data below the floor to equal the floor
data[data < floor] = floor
# set the data above the ceiling to equal the ceiling
data[data > ceiling] = ceiling
return data
def hist_match(source, template):
"""
Source: https://stackoverflow.com/a/33047048
Adjust the pixel values of a grayscale image such that its histogram
matches that of a target image
Arguments:
-----------
source: np.ndarray
Image to transform; the histogram is computed over the flattened
array
template: np.ndarray
Template image; can have different dimensions to source
Returns:
-----------
matched: np.ndarray
The transformed output image
"""
oldshape = source.shape
source = source.ravel()
template = template.ravel()
# get the set of unique pixel values and their corresponding indices and
# counts
s_values, bin_idx, s_counts = np.unique(source, return_inverse=True,
return_counts=True)
t_values, t_counts = np.unique(template, return_counts=True)
# take the cumsum of the counts and normalize by the number of pixels to
# get the empirical cumulative distribution functions for the source and
# template images (maps pixel value --> quantile)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_quantiles /= t_quantiles[-1]
# interpolate linearly to find the pixel values in the template image
# that correspond most closely to the quantiles in the source image
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape) |
cified here is
# # used to dimension the transport stream STD (2.4.2 in ITU-T Rec. xxx |
# # ISO/IEC 13818-1), or the program stream STD (2.4.5 in ITU-T Rec. xxx |
# # ISO/IEC 13818-1).
# #
# # If the bitstream is not a constant rate bitstream the vbv_delay
# # field shall have the value FFFF in hexadecimal.
# #
# # Given the value encoded in the bitrate field, the bitstream shall be
# # generated so that the video encoding and the worst case multiplex
# # jitter do not cause STD buffer overflow or underflow.
# #
# #
# # ------------------------------------------------------------------------
# # Some parts in the code are based on mpgtx (mpgtx.sf.net)
def bitrate(self, file):
"""
read the bitrate (most of the time broken)
"""
file.seek(self.sequence_header_offset + 8, 0)
t, b = struct.unpack('>HB', file.read(3))
vrate = t << 2 | b >> 6
return vrate * 400
@staticmethod
def ReadSCRMpeg2(buffer):
"""
read SCR (timestamp) for MPEG2 at the buffer beginning (6 Bytes)
"""
if len(buffer) < 6:
return None
highbit = (byte2int(buffer) & 0x20) >> 5
low4Bytes = ((int(byte2int(buffer)) & 0x18) >> 3) << 30
low4Bytes |= (byte2int(buffer) & 0x03) << 28
low4Bytes |= indexbytes(buffer, 1) << 20
low4Bytes |= (indexbytes(buffer, 2) & 0xF8) << 12
low4Bytes |= (indexbytes(buffer, 2) & 0x03) << 13
low4Bytes |= indexbytes(buffer, 3) << 5
low4Bytes |= (indexbytes(buffer, 4)) >> 3
sys_clock_ref = (indexbytes(buffer, 4) & 0x3) << 7
sys_clock_ref |= (indexbytes(buffer, 5) >> 1)
return (int(highbit * (1 << | 16) * (1 << 16)) + low4Bytes) / 90000
@staticmethod
def ReadSCRMpeg1(buffer):
"""
read SCR (timestamp) for MPEG1 at the buffer beginning (5 Bytes)
"""
if len(buffer) < 5:
return None
highbit = (byte2int(buffer) >> 3) & 0x01
low4Bytes = ((int(byte2int(bu | ffer)) >> 1) & 0x03) << 30
low4Bytes |= indexbytes(buffer, 1) << 22
low4Bytes |= (indexbytes(buffer, 2) >> 1) << 15
low4Bytes |= indexbytes(buffer, 3) << 7
low4Bytes |= indexbytes(buffer, 4) >> 1
return (int(highbit) * (1 << 16) * (1 << 16) + low4Bytes) / 90000
@staticmethod
def ReadPTS(buffer):
"""
read PTS (PES timestamp) at the buffer beginning (5 Bytes)
"""
high = ((byte2int(buffer) & 0xF) >> 1)
med = (indexbytes(buffer, 1) << 7) + (indexbytes(buffer, 2) >> 1)
low = (indexbytes(buffer, 3) << 7) + (indexbytes(buffer, 4) >> 1)
return ((int(high) << 30) + (med << 15) + low) / 90000
def ReadHeader(self, buffer, offset):
"""
Handle MPEG header in buffer on position offset
Return None on error, new offset or 0 if the new offset can't be scanned
"""
if buffer[offset:offset + 3] != '\x00\x00\x01':
return None
_id = indexbytes(buffer, offset + 3)
if _id == PADDING_PKT:
return offset + (indexbytes(buffer, offset + 4) << 8) + \
indexbytes(buffer, offset + 5) + 6
if _id == PACK_PKT:
if indexbytes(buffer, offset + 4) & 0xF0 == 0x20:
self.type = 'MPEG-1 Video'
self.get_time = self.ReadSCRMpeg1
self.mpeg_version = 1
return offset + 12
elif (indexbytes(buffer, offset + 4) & 0xC0) == 0x40:
self.type = 'MPEG-2 Video'
self.get_time = self.ReadSCRMpeg2
return offset + (indexbytes(buffer, offset + 13) & 0x07) + 14
else:
# I have no idea what just happened, but for some DVB
# recordings done with mencoder this points to a
# PACK_PKT describing something odd. Returning 0 here
# (let's hope there are no extensions in the header)
# fixes it.
return 0
if 0xC0 <= _id <= 0xDF:
# code for audio stream
for a in self.audio:
if a.id == _id:
break
else:
self.audio.append(core.AudioStream())
self.audio[-1]._set('id', _id)
return 0
if 0xE0 <= _id <= 0xEF:
# code for video stream
for v in self.video:
if v.id == _id:
break
else:
self.video.append(core.VideoStream())
self.video[-1]._set('id', _id)
return 0
if _id == SEQ_HEAD:
# sequence header, remember that position for later use
self.sequence_header_offset = offset
return 0
if _id in [PRIVATE_STREAM1, PRIVATE_STREAM2]:
# private stream. we don't know, but maybe we can guess later
add = indexbytes(buffer, offset + 8)
# if (indexbytes(buffer, offset+6) & 4) or 1:
# id = indexbytes(buffer, offset+10+add)
if buffer[offset + 11 + add:offset + 15 + add].find('\x0b\x77') != -1:
# AC3 stream
for a in self.audio:
if a.id == _id:
break
else:
self.audio.append(core.AudioStream())
self.audio[-1]._set('id', _id)
self.audio[-1].codec = 0x2000 # AC3
return 0
if _id == SYS_PKT:
return 0
if _id == EXT_START:
return 0
return 0
# Normal MPEG (VCD, SVCD) ========================================
def isMPEG(self, file, force=False):
"""
This MPEG starts with a sequence of 0x00 followed by a PACK Header
http://dvd.sourceforge.net/dvdinfo/packhdr.html
"""
file.seek(0, 0)
buffer = file.read(10000)
offset = 0
# seek until the 0 byte stop
while offset < len(buffer) - 100 and buffer[offset] == '\0':
offset += 1
offset -= 2
# test for mpeg header 0x00 0x00 0x01
header = '\x00\x00\x01%s' % chr(PACK_PKT)
if offset < 0 or not buffer[offset:offset + 4] == header:
if not force:
return 0
# brute force and try to find the pack header in the first
# 10000 bytes somehow
offset = buffer.find(header)
if offset < 0:
return 0
# scan the 100000 bytes of data
buffer += file.read(100000)
# scan first header, to get basic info about
# how to read a timestamp
self.ReadHeader(buffer, offset)
# store first timestamp
self.start = self.get_time(buffer[offset + 4:])
while len(buffer) > offset + 1000 and \
buffer[offset:offset + 3] == '\x00\x00\x01':
# read the mpeg header
new_offset = self.ReadHeader(buffer, offset)
# header scanning detected error, this is no mpeg
if new_offset is None:
return 0
if new_offset:
# we have a new offset
offset = new_offset
# skip padding 0 before a new header
while len(buffer) > offset + 10 and \
not indexbytes(buffer, offset + 2):
offset += 1
else:
# seek to new header by brute force
offset += buffer[offset + 4:].find('\x00\x00\x01') + 4
# fill in values for support functions:
self.__seek_size__ = 1000000
self.__sample_size__ = 10000
self.__search__ = self._find_timer_
self.filename = file.name
# get length of the file
self.length = self.get_length()
return 1
@staticmethod
def _find_timer_(buffer):
"""
Return position of timer in buffer or None if not found.
This function is valid for 'normal' mpeg files
"" |
# Configuration settings for Enso. Eventually this will take
# localization into account too (or we can make a separate module for
# such strings).
# The keys to start, exit, and cancel the quasimode.
# Their values are strings referring to the names of constants defined
# in the os-specific input module in use.
QUASIMODE_START_KEY = "KEYCODE_RCONTROL"
QUASIMODE_END_KEY = "KEYCODE_RETURN"
QUASIMODE_CANCEL_KEY1 = "KEYCODE_ESCAPE"
QUASIMODE_CANCEL_KEY2 = "KEYCODE_RCONTROL"
# Whether the Quasimode is actually modal ("sticky").
IS_QUASIMODE_MODAL = True
# Amount of time, in seconds (float), to wait from the time
# that the quasimode begins drawing to the time that the
# suggestion list begins to be displayed. Setting this to a
# value greater than 0 will effectively create a
# "spring-loaded suggestion list" behavior.
QUASIMODE_SUGGESTION_DELAY = 0.2
# The maximum number of suggestions to display in the quasimode.
QUASIMODE_MAX_SUGGESTIONS = 6
# The minimum number of characters the user must type before the
# auto-completion mechanism engages.
QUASIMODE_MIN_AUTOCOMPLETE_CHARS = 2
# The message displayed when the user types some text that is not a command.
BAD_COMMAND_MSG = "<p><command>%s</command> is not a command.</p>"\
"%s"
# Minimum number of characters that should have been typed into the
# quasimode for a bad command message to be shown.
BAD_COMMAND_MSG_MIN_CHARS = 2
# The captions for the above message, indicating commands that are related
# to the command the user typed.
ONE_SUGG_CAPTION = "<caption>Did you mean <command>%s</command>?</caption>"
# The string that is displayed in the quasimode window when the user
# first enters the quasimode.
QUASIMODE_DEFAULT_HELP = u"Welcome to Enso! Enter a command, " \
u"or type \u201chelp\u201d for assistance."
# The string displayed when the user has typed some characters but there
# is no matching command.
QUASIMODE_NO_COMMAND_HELP = "There is no matching command. "\
"Use backspace to delete characters."
# Message XML for the Spla | sh message shown when Enso first loads.
OPENING_MSG_XML = "<p>Welcome to <command>Enso</command>!</p>" + \
"<caption>Copyright © 2008 Humanized, Inc.</caption>"
# Message XML displayed when the mouse hovers over a mini message.
MINI_MSG_HELP_XML = "<p>The <command>hide mini messages</command>" \
" and <command>put</command> commands control" | \
" these mini-messages.</p>"
ABOUT_BOX_XML = u"<p><command>Enso</command> Community Edition</p>" \
"<caption> </caption>" \
"<p>Copyright © 2008 <command>Humanized, Inc.</command></p>" \
"<p>Copyright © 2008-2009 <command>Enso Community</command></p>" \
"<p>Version 1.0</p>"
# List of default platforms supported by Enso; platforms are specific
# types of providers that provide a suite of platform-specific
# functionality.
DEFAULT_PLATFORMS = ["enso.platform.win32"]
# List of modules/packages that support the provider interface to
# provide required platform-specific functionality to Enso.
PROVIDERS = []
PROVIDERS.extend(DEFAULT_PLATFORMS)
# List of modules/packages that support the plugin interface to
# extend Enso. The plugins are loaded in the order that they
# are specified in this list.
PLUGINS = ["enso.contrib.scriptotron",
"enso.contrib.help",
"enso.contrib.google",
"enso.contrib.evaluate"]
FONT_NAME = {"normal" : "Gentium (Humanized)", "italic" : "Gentium Italic"}
|
th.log10(config["DEFAULT"]["1200000"]["CPE"] / _CyclesPerElement_)
absorption_range_1200000 = config["DEFAULT"]["1200000"]["RANGE"] + ((config["DEFAULT"]["1200000"]["ABSORPTION_SCALE"] - absorption) * config["DEFAULT"]["1200000"]["RANGE"])
if _SystemFrequency_ > config["DEFAULT"]["1200000"]["FREQ"]:
# Ref in and xmt watt
refBin_1200000 = config["DEFAULT"]["1200000"]["BIN"]
xmtW_1200000 = config["DEFAULT"]["1200000"]["XMIT_W"]
if _CBTON_:
# Check if NB
if _CBTBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCBTBB_Mode.NARROWBAND_LONG_RANGE.value:
btRange_1200000 = 2.0 * rScale_1200000 * (absorption_range_1200000 + config["DEFAULT"]["1200000"]["BIN"] * dB_1200000 + 15.0 * config["DEFAULT"]["1200000"]["BIN"])
else:
btRange_1200000 = 2.0 * rScale_1200000 * (absorption_range_1200000 + config["DEFAULT"]["1200000"]["BIN"] * dB_1200000)
else:
btRange_1200000 = 0.0
if _CWPON_:
# Check if NB
if _CWPBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCWPBB_TransmitPulseType.NARROWBAND.value:
wpRange_1200000 = rScale_1200000 * (absorption_range_1200000 + config["DEFAULT"]["1200000"]["BIN"] * dB_1200000 + config["DEFAULT"]["NB_PROFILE_REF"] * config["DEFAULT"]["1200000"]["BIN"])
else:
wpRange_1200000 = rScale_1200000 * (absorpti | on_range_1200000 + config["DEFAULT"]["1200000"]["BIN"] * dB_1200000)
else:
wpRange_1200000 = 0.0
else:
btRange_1200000 = 0.0
wpRange_1200000 = 0.0
# 600khz
btRange_600000 = 0.0
wpRange_600000 = 0.0
refBin_600000 = 0.0
xmtW_600000 = 0.0
rScale_600000 = math.cos(_BeamAngle_ / 180.0 * math.pi) / math.cos(config["DEFAULT"]["600000"]["BEAM_ANGLE"] / 180.0 * math.pi)
dI_600000 = 20.0 * math.log10(math.pi * | config["DEFAULT"]["600000"]["DIAM"] / waveLength)
dB_600000 = 0.0;
if config["DEFAULT"]["600000"]["BIN"] == 0 or _CyclesPerElement_ == 0:
dB_600000 = 0.0;
else:
dB_600000 = 10.0 * math.log10(_CWPBS_ / config["DEFAULT"]["600000"]["BIN"]) + dI - dI_600000 - 10.0 * math.log10(config["DEFAULT"]["600000"]["CPE"] / _CyclesPerElement_)
absorption_range_600000 = config["DEFAULT"]["600000"]["RANGE"] + ((config["DEFAULT"]["600000"]["ABSORPTION_SCALE"] - absorption) * config["DEFAULT"]["600000"]["RANGE"])
if (_SystemFrequency_ > config["DEFAULT"]["600000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["1200000"]["FREQ"]):
# Ref Bin and xmt watt
refBin_600000 = config["DEFAULT"]["600000"]["BIN"];
xmtW_600000 = config["DEFAULT"]["600000"]["XMIT_W"];
if _CBTON_:
# Check if NB
if _CBTBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCBTBB_Mode.NARROWBAND_LONG_RANGE.value:
btRange_600000 = 2.0 * rScale_600000 * (absorption_range_600000 + config["DEFAULT"]["600000"]["BIN"] * dB_600000 + 15.0 * config["DEFAULT"]["600000"]["BIN"] )
else:
btRange_600000 = 2.0 * rScale_600000 * (absorption_range_600000 + config["DEFAULT"]["600000"]["BIN"] * dB_600000)
else:
btRange_600000 = 0.0
if _CWPON_:
# Checck if NB
if _CWPBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCWPBB_TransmitPulseType.NARROWBAND.value:
wpRange_600000 = rScale_600000 * (absorption_range_600000 + config["DEFAULT"]["600000"]["BIN"] * dB_600000 + config["DEFAULT"]["NB_PROFILE_REF"] * config["DEFAULT"]["600000"]["BIN"] )
else:
wpRange_600000 = rScale_600000 * (absorption_range_600000 + config["DEFAULT"]["600000"]["BIN"] * dB_600000)
else:
wpRange_600000 = 0.0
else:
btRange_600000 = 0.0
wpRange_600000 = 0.0
# 300khz
btRange_300000 = 0.0
wpRange_300000 = 0.0
refBin_300000 = 0.0
xmtW_300000 = 0.0
rScale_300000 = math.cos(_BeamAngle_ / 180.0 * math.pi) / math.cos(config["DEFAULT"]["300000"]["BEAM_ANGLE"] / 180.0 * math.pi)
dI_300000 = 20.0 * math.log10(math.pi * config["DEFAULT"]["300000"]["DIAM"] / waveLength)
dB_300000 = 0.0
if (config["DEFAULT"]["300000"]["BIN"] == 0) or (_CyclesPerElement_ == 0):
dB_300000 = 0.0
else:
dB_300000 = 10.0 * math.log10(_CWPBS_ / config["DEFAULT"]["300000"]["BIN"]) + dI - dI_300000 - 10.0 * math.log10(config["DEFAULT"]["300000"]["CPE"] / _CyclesPerElement_)
absorption_range_300000 = config["DEFAULT"]["300000"]["RANGE"] + ((config["DEFAULT"]["300000"]["ABSORPTION_SCALE"] - absorption) * config["DEFAULT"]["300000"]["RANGE"])
if (_SystemFrequency_ > config["DEFAULT"]["300000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["600000"]["FREQ"]):
# Ref Bin and xmt watt
refBin_300000 = config["DEFAULT"]["300000"]["BIN"]
xmtW_300000 = config["DEFAULT"]["300000"]["XMIT_W"]
if _CBTON_:
# Check if NB
if _CBTBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCBTBB_Mode.NARROWBAND_LONG_RANGE.value:
btRange_300000 = 2.0 * rScale_300000 * (absorption_range_300000 + config["DEFAULT"]["300000"]["BIN"] * dB_300000 + 15.0 * config["DEFAULT"]["300000"]["BIN"])
else:
btRange_300000 = 2.0 * rScale_300000 * (absorption_range_300000 + config["DEFAULT"]["300000"]["BIN"] * dB_300000)
else:
btRange_300000 = 0.0
if _CWPON_:
# Checck if NB
if _CWPBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCWPBB_TransmitPulseType.NARROWBAND.value:
wpRange_300000 = rScale_300000 * (absorption_range_300000 + config["DEFAULT"]["300000"]["BIN"] * dB_300000 + config["DEFAULT"]["NB_PROFILE_REF"] * config["DEFAULT"]["300000"]["BIN"])
else:
wpRange_300000 = rScale_300000 * (absorption_range_300000 + config["DEFAULT"]["300000"]["BIN"] * dB_300000)
else:
wpRange_300000 = 0.0
else:
# Return 0 if not selected
btRange_300000 = 0.0
wpRange_300000 = 0.0
# 150khz
btRange_150000 = 0.0
wpRange_150000 = 0.0
refBin_150000 = 0.0
xmtW_150000 = 0.0
rScale_150000 = math.cos(_BeamAngle_ / 180.0 * math.pi) / math.cos(config["DEFAULT"]["150000"]["BEAM_ANGLE"] / 180.0 * math.pi)
dI_150000 = 20.0 * math.log10(math.pi * config["DEFAULT"]["150000"]["DIAM"] / waveLength)
dB_150000 = 0.0;
if (config["DEFAULT"]["150000"]["BIN"] == 0) or (_CyclesPerElement_ == 0):
dB_150000 = 0.0
else:
dB_150000 = 10.0 * math.log10(_CWPBS_ / config["DEFAULT"]["150000"]["BIN"]) + dI - dI_150000 - 10.0 * math.log10(config["DEFAULT"]["150000"]["CPE"] / _CyclesPerElement_)
absorption_range_150000 = config["DEFAULT"]["150000"]["RANGE"] + ((config["DEFAULT"]["150000"]["ABSORPTION_SCALE"] - absorption) * config["DEFAULT"]["150000"]["RANGE"])
if (_SystemFrequency_ > config["DEFAULT"]["150000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["300000"]["FREQ"]):
# Ref Bin and xmt watt
refBin_150000 = config["DEFAULT"]["150000"]["BIN"]
xmtW_150000 = config["DEFAULT"]["150000"]["XMIT_W"]
if _CBTON_:
# Check if NB
if _CBTBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCBTBB_Mode.NARROWBAND_LONG_RANGE.value:
btRange_150000 = 2.0 * rScale_150000 * (absorption_range_150000 + config["DEFAULT"]["150000"]["BIN"] * dB_150000 + 15.0 * config["DEFAULT"]["150000"]["BIN"])
else:
btRange_150000 = 2.0 * rScale_150000 * (absorption_range_150000 + config["DEFAULT"]["150000"]["BIN"] * dB_150000)
else:
btRange_150000 = 0.0
if _CWPON_:
# Checck if NB
if _CWPBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCWPBB_TransmitPulseType.NARROWBAND.value:
wpRange_150000 = rScale_150000 * (absorption_range_150000 + config["DEFAULT"]["150000"]["BIN"] * dB_150000 + config["DEFAULT"]["NB_PROFILE_REF"] * config["DEFAULT"]["150000"]["BIN"])
|
from toee import *
from utilities import *
from Co8 import *
from py00439script_daemon import npc_set, npc_get
from combat_standard_routines import *
def san_dialog( attachee, triggerer ):
if (npc_get(attachee, 1) == 0):
triggerer.begin_dialog( attachee, 1 )
elif (npc_get(attachee, 1) == 1):
triggerer.begin_dialog( attachee, 100 )
return SKIP_DEFAULT
def san_start_combat( attachee, triggerer ):
leader = game.party[0]
StopCombat(attachee, 0)
leader.begin_dialog( attachee, 4000 )
return RUN_DEFAULT
def give_default_starting_equipment(x = 0):
for pc in game.party:
if pc.stat_level_get(stat_level_barbarian) > 0:
for aaa in [4074, 6059, 6011, 6216, 8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_bard) > 0:
for aaa in [4009, 6147, 6011, 4096 ,5005 ,5005 ,6012 ,6238 ,12564 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_druid) > 0:
for aaa in [6216 ,6217 ,4116 ,4115 ,5007 ,5007 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_cleric) > 0 or pc.divine_spell_level_can_cast() > 0:
for aaa in [6013 ,6011 ,6012 ,6059 ,4071 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_fighter) > 0:
for aaa in [6013 ,6010 ,6011 ,6012 ,6059 ,4062 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_monk) > 0:
if pc.stat_level_get(stat_race) in [race_gnome, race_halfling]:
for aaa in [6205 ,6202 ,4060 ,8014]: # dagger (4060) instead of quarterstaff
create_item_in_inventory( aaa, pc )
else:
for aaa in [6205 ,6202 ,4110 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_paladin) > 0:
for aaa in [6013 ,6012 ,6011 ,6032 ,6059 ,4036 ,6124 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_ranger) > 0:
for aaa in [6013 ,6012 ,6011 ,6059 ,4049 ,4201 ,5004 ,5004 ,8014 ,6269]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_rogue) > 0:
for aaa in [6042 ,6045 ,6046 ,4049 ,4060 ,6233 ,8014 ,4096 ,5005 ,5005 ,8014 ,12012]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_swashbuckler) > 0:
for aaa in [6013 ,6045 ,6046 ,4009 ,4060 ,6238 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_sorcerer) > 0:
if pc.stat_level_get(stat_race) in [race_gnome, race_halfling]:
for aaa in [6211 ,6045 ,6046 ,6124 ,4060 ,4115 ,5007 ,5007 ,8014]: # dagger (4060) instead of spear
create_item_in_inventory( aaa, pc )
else:
for aaa in [6211 ,6045 ,6046 ,6124 ,4117 ,4115 ,5007 ,5007 ,8014]:
cr | eate_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_warmage) > 0:
if pc.stat_level_get(stat_race) in [race_gnome, race_halfling]:
for aaa in [6013 ,6045 ,6046 ,6059, 4071 , 4115 ,50 | 07 ,5007, 8014]: # mace (4071) instead of spear
create_item_in_inventory( aaa, pc )
else:
for aaa in [6013 ,6045 ,6046 ,6059, 4117 , 4115 ,5007 ,5007, 8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_beguiler) > 0:
for aaa in [6042 ,6045 ,6046 ,4049 ,4060 ,6233 ,8014 ,4096 ,5005 ,5005 ,8014 ,12012]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_wizard) > 0 or pc.arcane_spell_level_can_cast() > 0:
if pc.stat_level_get(stat_race) in [race_gnome, race_halfling]:
for aaa in [4060 ,4096 ,5005 ,5005 ,6081 ,6143 ,6038 ,6011 ,8014]:
create_item_in_inventory( aaa, pc )
else:
for aaa in [4110 ,4096 ,5005 ,5005 ,6081 ,6143 ,6038 ,6011 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_scout) > 0:
for aaa in [6013 ,6012 ,6011, 4049, 4201 ,5004 ,5004 ,8014, 6269, 12012]:
create_item_in_inventory( aaa, pc )
else: # default to rogue outfit
for aaa in [6042 ,6045 ,6046 ,4049 ,4060 ,6233 ,8014 ,4096 ,5005 ,5005 ,8014 ,12012]:
create_item_in_inventory( aaa, pc )
return
def defalt_equipment_autoequip():
for pc in game.party:
pc.item_wield_best_all() |
in=-inf, ymax=inf):
xs, ys = [], []
for xi, yi in zip(x, y):
if xmin<=xi<=xmax and ymin<=yi<=ymax:
xs.append(xi)
ys.append(yi)
return array(xs), array(ys)
class optimbox(object):
"""optimbox is a class used for fitting curves and linked with the fmin decorator.
as input, it must contains a dictionary with the keys 'objective', 'goal'.
it can contain optionally the keys 'xlim', 'ylim', 'weight', 'yscale'.
if yscale is set to 'lin' (default), the error calculation is done by weight*(objective-goal)
if yscale is set to 'log', the fit is done by weight*(objective-goal)/goal.
if weight is not defined, weight is calculated when yscale='lin' as mae(goal)
if weight is not defined, weight is set when yscale='log' as 1.0.
the optimbox's error is returned using the class function self.error().
self.error() is used in fmin.
"""
def mean(self, x):
return mae(x)
def __init__(self, kwargs):
self._error = 0.0
if 'objective' in kwargs and 'goal' in kwargs:
x1, y1 = kwargs['objective']
x2, y2 = kwargs['goal']
else:
raise Exception('instances for the optimbox are not correct')
yscale = kwargs.get('yscale', 'lin')
xmin, xmax = kwargs.get('xlim', (-inf, inf))
ymin, ymax = kwargs.get('ylim', (-inf, inf))
x1, y1 = box(x1, y1, xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
x2, y2 = box(x2, y2, xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
if yscale == 'lin':
weight = kwargs.get('weight', self.mean(y2))
if hasattr(weight, '__iter__'):
raise Exception('weight cannot be a list of values')
error = weight*(y1-y2)
if hasattr(error, '__iter__'):
self._error = self.mean(error)
else:
self._error = abs(error)
elif yscale == 'log':
weight = kwargs.get('weight', 1.0)
if hasattr(weight, '__iter__'):
raise Exception('weight cannot be a list of values')
try:
error = weight*(y1-y2)/y2
except ZeroDivisionError:
ZeroDivisionError('at least one point of the scatter data is zero')
if hasattr(error, '__iter__'):
self._error = self.mean(error)
else:
self._error = abs(error)
def error(self):
return self._error
class fmin(object):
x = OrderedDict() # ordered dictionary
bounds = OrderedDict() # ordered dictionary
def __init__(self, method='cma-es', **options):
"""fmin is a function decorator used for minimization of function.
options:
for method = 'cma-es'
variables = 'all'
sigma0 = 0.1
tolx = 1e-3
tolfun = 1e-5
seed = 1234
maxiter = '100 + 50*(N+3)**2 // popsize**0.5'
maxfevals = inf
popsize = '4 + int(3*log(N))'
verbose = -1
fmin.x <- dict
fmin.bounds <- dict
"""
self.method = method
self.options = options
def __call__(self, func):
if self.method == 'cma-es':
results = self._fmin_cma_es(func=func, **dict(self.options))
return results
def _fmin_cma_es(self, func, variables='all', sigma0=0.1, tolx=1e-3, seed=1234,
maxiter='100+50*(N+3)**2//popsize**0.5', verbose=-1,
maxfevals=float('inf'), popsize='4+int(3*log(N))', tolfun=1e-5 ):
now = time()
def tf(X, bounds):
Y = []
for x, (xmin, xmax) in zip(X, bounds):
slope = 1./(xmax-xmin)
intercept = 1.0-slope*xmax
y = slope*x + intercept
Y.append(y)
return Y
def tfinv(Y, bounds):
X = []
for y, (xmin, xmax) in zip(Y, bounds):
slope = xmax-xmin
intercept = xmax-slope
x = slope*y + intercept
X.append(x)
return X
def eval_error(output):
if isinstance(output, dict):
return optimbox(output).error()
elif isinstance(output, (float, int)):
return float(abs(output))
elif isinstance(output, tuple):
return average([ eval_error(elt) for elt in output ])
elif hasattr(output, '__iter__'):
return mae(output)
else:
raise Exception('output must be based on optimbox, float, tuple or list/array')
# init
if variables == 'all':
variables = fmin.x.keys()
x0 = [fmin.x[key] for key in variables]
bounds = [fmin.bounds[key] for key in variables]
options = { 'boundary_handling' : 'BoundTransform ',
'bounds' : [[0]*len(x0), [1]*len(x0)],
'seed' : seed,
'verb_time' : False,
'scaling_of_variables' : None,
'verb_disp' : 1,
'maxiter' : maxiter,
'maxfevals' : maxfevals,
'signals_filename' : 'cmaes_signals.par',
'tolx' : tolx,
'popsize' : popsize,
'verbose' : verbose,
'ftarget': 1e-12,
'tolfun' : 1e-5,
}
es = cma.CMAEvolutionStrategy(tf(x0, bounds), sigma0, options)
# initial error with the original set of variables values
error = eval_error( func(**fmin.x) )
best_objective = error
print 'Start CMA-ES Optimizer...'
print
print '{step:>6}{residual:>11}{x}'.format(step='step', x='{:>11}'*len(variables), residual='residual').format(*variables)
print '-'*(6+11+11*len(variables))
print '{step:>6}{residual:>11.3e}{x}'.format(step=0, x='{:>11.3e}'*len(x0), residual=error).format(*x0)
while not es.stop():
solutions = es.ask() # provide a set of variables values
objectives = [] # init
for i, x in enumerate(solutions):
xt = { k:v for k, v in zip(variables, tfinv(x, bounds)) }
# add other keyword arguments
for key in fmin.x.keys():
if not(key in variables):
xt[key] = fmin.x[key]
error = eval_error( func(**xt) )
objectives.append( error )
# if error is better then update fmin.x
if error < best_objective:
fmin.x.update(xt)
best_objective = error
es.tell(solutions, objectives)
#es.disp(1)
if es | .countiter%10==0:
print
print '{step:>6}{residual:>11}{x}'.format(step='ste | p', x='{:>11}'*len(variables), residual='residual').format(*variables)
print '-'*(6+11+11*len(variables))
indx = objectives.index(min(objectives))
x = tfinv(solutions[indx], bounds)
isbest = ''
if objectives[indx] == best_objective:
isbest = '*'
print '{step:>6}{residual:>11.3e}{x} {isbest}'.format(step=es.countiter, x='{:>11.3e}'*len(x), residual=objectives[indx], isbest=isbest).format(*x)
#es.result_pretty()
xbest, f_xbest, evaluations_xbest, evaluations, iterations, pheno_xmean, effective_stds = es.result()
stop = es.stop()
print '-----------------'
print 'termination on %s=%.2e'%(stop.keys()[0], stop.values()[0])
print 'bestever f-value: %r'%(f_xbest)
print 'incumbent solution: %r'%(list(tfinv(xbest, bounds)))
print 'std deviation: %r'%(list(effective_stds))
print 'evaluation func: %r'%(evaluations)
print 'total time:',
minutes = int((time()-now)/60)
if minutes>1:
pr |
from __future__ import print_function
from sqlalchemy import create_engine
from sqlalchemy.pool import NullPool
import sqlalchemy
import sys
# This value must be incremented after schema changes on replicated tables!
SCHEMA_VERSION = 1
engine = None
def init_db_engine(connect_str):
global engine
engine = create_engine(connect_str, poolclass=NullPool)
def run_sql_script(sql_file_path):
with open(sql_file_path) as sql:
connection = engine.connect()
connection.execute(sql.read())
connection.close()
def run_sql_script_without | _transaction(sql_file_path):
with open(sql_file_path) as sql:
connection = engine.connect()
connection.connection.set_isolation_level(0)
lines = sql.read().splitlines()
try:
for line in lines:
# TODO: Not a great way of removing comments. The alternati | ve is to catch
# the exception sqlalchemy.exc.ProgrammingError "can't execute an empty query"
if line and not line.startswith("--"):
connection.execute(line)
except sqlalchemy.exc.ProgrammingError as e:
print("Error: {}".format(e))
return False
finally:
connection.connection.set_isolation_level(1)
connection.close()
return True
|
import random
import musictheory
import fileza | rt
import math
from pydub import AudioSegment
from pydub.playback import play
class Part:
def __init__(self, typ=None, intensity=0, size=0, gen=0, cho=0):
self._type = typ #"n1", "n2", "bg", "ch", "ge"
if inte | nsity<0 or gen<0 or cho<0 or size<0 or intensity>1 or size>1 or gen>1 or cho>1:
raise ValueError ("Invalid Values for Structure Part")
self._intensity = intensity # [0-1]
self._size = size # [0-1]
self._genover = gen # [0-1] overlay of general type lines
self._chover = cho # [0-1] overlay of chorus type lines
def __repr__(self):
return "[" + self._type + "-" + str(self._intensity) + "-" + str(self._size) + "-" + str(self._genover) + "-" + str(self._chover) + "]"
@classmethod
def fromString(cls, string): # [n1-0.123-1-0.321-0.2] type, intensity, size, genoverlay, chooverlay
while string[0] == " ":
string = string[1:]
while string[0] == "\n":
string = string[1:]
while string[-1] == " ":
string = string[:-1]
while string[-1] == "\0":
string = string[:-1]
while string[-1] == "\n":
string = string[:-1]
if len(string)<8:
raise ValueError("Invalid Part string: "+string)
if string[0] == "[" and string[-1] == "]":
string = string[1:-1]
else:
raise ValueError("Invalid Part string: "+string)
typ = string[:2]
string = string[3:]
if not typ in ("n1", "n2", "bg", "ch", "ge"):
raise ValueError("Invalid Part Type string: "+typ)
valstrings = str.split(string, "-")
inten = eval(valstrings[0])
size = eval(valstrings[1])
gen = eval(valstrings[2])
cho = eval(valstrings[3])
return cls(typ, inten, size, gen, cho)
def getTheme(self, pal):
if self._type == "n1":
return pal._n1
if self._type == "n2":
return pal._n2
if self._type == "bg":
return pal._bg
if self._type == "ch":
return pal._ch
if self._type == "ge":
return pal._ge
def getAudio(self, pal, bpm):
base = self.baseDur(pal, bpm)
total = base + 3000 #extra time for last note to play
nvoic = math.ceil(self._intensity * self.getTheme(pal).countVoices())
try:
ngeno = math.ceil(self._genover * pal._ge.countVoices())
except:
ngeno = 0
try:
nchoo = math.ceil(self._chover * pal._ch.countVoices())
except:
nchoo = 0
sound = AudioSegment.silent(total)
them = self.getTheme(pal)
for i in range(nvoic):
voic = them._sorting[i].getVoice(them)
print(them._sorting[i].indicationStr(them)) #DEBUG !!
vsound = voic.partialAudio(self._size, bpm)
sound = sound.overlay(vsound)
them = pal._ge
for i in range(ngeno):
voic = them._sorting[i].getVoice(them)
print(them._sorting[i].indicationStr(them)) #DEBUG !!
vsound = voic.partialAudio(self._size, bpm)
sound = sound.overlay(vsound)
them = pal._ch
for i in range(nchoo):
voic = them._sorting[i].getVoice(them)
print(them._sorting[i].indicationStr(them)) #DEBUG !!
vsound = voic.partialAudio(self._size, bpm)
sound = sound.overlay(vsound)
return sound
def baseDur(self, pal, bpm): #get the base duration of this part of the song
return self.getTheme(pal).baseDurForStruct(self._size, bpm)
class Structure:
def __init__(self):
self._parts = ()
def add(self, part):
self._parts = self._parts+(part,)
def __repr__(self):
return "@STRUCTURE:" + str(self._parts)
def baseDur(self, pal, bpm=None):
if bpm == None:
bpm = pal._bpm
curTime = 0
for p in self._parts:
curTime = curTime + p.baseDur(pal, bpm)
return curTime
def songAudio(self, pal, bpm=None):
if bpm == None:
bpm = pal._bpm
total = self.baseDur(pal, bpm) + 3000 # 3 seconds for last note to play
sound = AudioSegment.silent(total)
curTime = 0
for p in self._parts:
paudio = p.getAudio(pal, bpm)
sound = sound.overlay(paudio, curTime)
curTime = curTime + p.baseDur(pal, bpm)
print("curTime:",curTime)
return sound
# wselect WeightedSelect returns element of dictionary based on dict weights {element:weight}
def wselect(dicti):
total=0
for i in list(dicti):
total = total + dicti[i]
indice = total*random.random()
for i in list(dicti):
if dicti[i]>=indice:
return i
indice = indice - dicti[i]
raise ValueError ("something went wrong")
# rselect RandomSelect returns random element of list
def rselect(lista):
return random.choice(lista)
def lenweights():
return {3:1, 4:1, 5:2, 6:3, 7:4, 8:3, 9:2, 10:1, 11:1}
def stweights():
return {"n1":5, "n2":4, "ch":2, "bg":1}
def n1weights():
return {"n1":4, "n2":2, "ch":3, "bg":1}
def n2weights():
return {"n1":2, "n2":3, "ch":4, "bg":2}
def chweights():
return {"n1":2, "n2":1, "ch":4, "bg":1}
def bgweights():
return {"n1":1, "n2":1, "ch":20, "bg":8}
def typeSequence(size):
last = wselect(stweights())
sequence=(last,)
while len(sequence)<size:
if last == "n1":
last = wselect(n1weights())
elif last == "n2":
last = wselect(n2weights())
elif last == "ch":
last = wselect(chweights())
elif last == "bg":
last = wselect(bgweights())
sequence = sequence + (last,)
return sequence
def siweights():
return {0.1:1, 0.2:2, 0.3:4, 0.4:5, 0.5:5, 0.6:4, 0.7:3, 0.8:2, 0.9:1}
def deltaweights():
return {-0.3:1, -0.2:1, -0.1:1, 0:5, 0.1:3, 0.2:2, 0.3:2}
def intensitySequence(size):
val = wselect(siweights())
sequence = (val,)
while len(sequence)<size:
val = val + wselect(deltaweights())
if val<0.1:
val = 0.1
if val>1:
val = 1
sequence = sequence + (val,)
return sequence
def soweights():
return {0:6, 0.1:2, 0.2:1}
def deltoweights():
return {-0.2:1, -0.1:1, 0:8, 0.1:2, 0.2:2}
def overlaySequence(size):
val = wselect(soweights())
sequence = (val,)
while len(sequence)<size:
val = val + wselect(deltoweights())
if val<0.1:
val = 0.1
if val>1:
val = 1
sequence = sequence + (val,)
return sequence
def ssweights():
return {0.2:1, 0.4:1, 0.6:1, 0.8:1, 1:16}
def sizeSequence(size):
sequence = ()
while len(sequence)<size:
sequence = sequence + (wselect(ssweights()),)
return sequence
def makeStruct(size = None):
if size == None:
size = wselect(lenweights())
types = typeSequence(size)
inten = intensitySequence(size)
sizes = sizeSequence(size)
overl = overlaySequence(size)
return joinSeqs(types, inten, sizes, overl)
def joinSeqs(types, inten, sizes, overl):
struct = Structure()
for i in range(len(types)):
if types[i]=="bg":
string = "["+types[i]+"-"+str(inten[i])+"-"+str(sizes[i])+"-"+"0"+"-"+str(overl[i])+"]" # If its a bridge it has chord overlay
pt = Part.fromString(string)
struct.add(pt)
else:
string = "["+types[i]+"-"+str(inten[i])+"-"+str(sizes[i])+"-"+str(overl[i])+"-"+"0"+"]" # Else it has gen overlay
pt = Part.fromString(string)
struct.add(pt)
return struct
def pooptest():
for i in range(30):
print(makeStruct())
|
# -*- coding: utf-8 -*-
import sqlite3
from flask import g, c | urrent_app
def connect_db():
db = sqlite3.connect(current_app.config['DATABASE_URI'])
db.row_factory = sqlite3.Row
return db
# http://flask.pocoo.org/docs/0.10/appcontext/
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
db = getattr(g, '_database', None)
if db is None:
db = g._ | database = connect_db()
return db
|
import threading
import time
class Status:
lock = None
statusno =0
def __init__(self):
self.lock = threading.Lock()
def update(self, add):
self.lock.acquire()
self.statusno = self.statusno + add
self.lock.release()
def get(self):
self.lock.acquire()
n = self.statusno
self.lock.release()
return n
def md5calc(status, args):
for i in args:
| time.sleep (1)
#print i
status.update(1)
def show_status(status):
while threading.active_count() > 2:
time.sleep(1)
print status.get()
status = Status()
slaves = []
for i in range(5):
t = threading.Thread(target=md5calc, args=(status, [1,2,5]))
t.start()
slaves.append(t)
m = threading.Thread(target=show_status, | args=(status,))
m.start()
m.join()
for t in slaves:
t.join()
|
import os.path
import platform
from nose2.compat import unittest
from nose2.tests._common import FunctionalTestCase
class TestCoverage(FunctionalTestCase):
@unittest.skipIf(
platform.python_version_tuple()[:2] == ('3', '2'),
'coverage package does not support python 3.2')
def test_run(self):
proc = self.runIn(
'scenario/test_with_module',
| '-v',
'--with-coverage',
'--coverage=lib/'
)
STATS = '\s+8\s+5\s+38%'
expected = os.path.join('lib', 'mod1(.py)?')
expected = expected.replace('\\', r'\\')
expected = expected + STATS
stdout, stderr = proc.communicate()
self.assertTestRunOutputMatches(
proc,
stderr=expected)
self.assertTestRunOutputMatches(
proc,
st | derr='TOTAL\s+' + STATS)
|
__author__ = 'Alex Breshears'
__license__ = '''
Copyright (C) 2012 Alex Breshears
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (t | he "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "A | S IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from django.contrib.admin import site
from django.contrib import admin
from shorturls.models import *
class LinkClickInline(admin.TabularInline):
model = LinkClick
extras = 0
class LinkAdmin(admin.ModelAdmin):
inlines = [LinkClickInline]
def save_model(self, request, obj, form, change):
obj.save()
site.register(Link, LinkAdmin) |
# Copyright (C) 2013 - 2016 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import logging
import traceback
from collections import defaultdict
from ..anagonda.context import guru
from commands.base import Command
class PackageSymbols(Command):
"""Run guru to get a detailed list of the package symbols
"""
def __init__(self, callback, uid, vid, scope, code, path, buf, go_env):
self.vid = vid
self.scope = scope
self.code = code
self.path = path
self.buf = buf
self.go_env = go_env
super(PackageSymbols, self).__init__(callback, uid)
def run(self):
"""Run the command
"""
try:
offset = getattr(self, 'offset', None)
if offset is None:
offset = self.code.find('package ') + len('package ') + 1
with guru.Guru(
self.scope, 'describe', self.path,
offset, self.buf, self.go_env) as desc:
symbols = []
for symbol in self._sort(desc):
path, line, col = symbol['pos'].split(':')
symbols.append({
'filename': path,
'line': int(line),
'col': int(col),
'ident': symbol['name'],
'full': symbol['type'],
'keyword': symbol['kind'],
'show_filename': True
})
self.callback({
'success': True,
'result': symbols,
'uid': self.uid,
'vid': self.vid
})
except Exception as error:
logging.error(error)
logging.debug(traceback.format_exc())
self.callback({
'success': False,
'error': str(error),
'uid': self.uid,
'vid': self.vid
})
def _sort(self, desc):
"""Sort the output by File -> Vars -> Type -> Funcs
"""
symbols = []
aggregated_data = defaultdict(lambda: [])
for elem in desc.get('package', {}).get('members', []):
filename = elem['pos'].split(':')[0]
aggregated_data[filename].append(elem)
for filename, elems in aggregated_data.items():
symbols += sorted(
[e for e in elems if e['kind'] in ['var', 'const']],
key=lambda x: x['pos']
)
symbols += sorted(
[e for e in elems if e['kind'] == 'type'],
key=lambda x: x['pos']
)
symbols += sorted(
[e for e in elems if e['kind'] == 'func'],
key=lambda x: x['pos']
)
for e in elems:
if e['kind'] == 'type':
methods = []
for method in e.get('methods', []):
new_elem = method
new_elem['kind'] = 'func'
new_elem['type'] = method['name']
methods.append(new_elem)
symbols += sorted(methods, key=lambda x: x['pos'])
return symbols
class PackageSymbolsCursor(PackageSymbols):
"""Run guru to get detailed information about the symbol under cursor
"""
def __init__(self, cb, uid, vid, scope, code, path, buf, off, go_env):
self.offset = off
super(PackageSymbolsCursor, self).__init__(
cb, uid, vid, scope, code, path, buf, go_env
)
def _sort(self, desc):
"""Sort the output by File -> Vars -> Type -> Funcs
"""
if desc.get('package') is not None:
return super(PackageSymbolsCursor, self)._sort(desc)
symbols = []
aggregated_data = defaultdict(lambda: [])
detail_field = desc.get('detail')
if detail_field is None:
return symbols
details = desc.get(detail_field)
if details is None:
return symbols
if detail_field == 'type':
filename = details.get('namepos', desc['pos']).split(':')[0]
details['pos'] = details.get('namepos', desc['pos'])
| details['name'] = desc['desc']
details['kind'] = details['type']
aggregated_data[filename].append(details)
for elem in details.get('methods', []):
filename = elem['pos'].split(':')[0]
elem['type'] = elem['name']
elem['kind'] = elem['type']
aggregated_data[filename].append(elem)
else:
filename = details['objpos'].split(':')[0]
| details['pos'] = details['objpos']
details['name'] = details['type']
details['kind'] = details['type']
aggregated_data[filename].append(details)
for filename, elems in aggregated_data.items():
symbols += sorted(elems, key=lambda x: x['pos'])
return symbols
|
import time
import numpy as np
import keras
import tensorflow as tf
import keras.backend as K
from keras import optimizers
from keras.models import load_model
from keras.callbacks import Callback
from functions import calculate_top_k_new_only
"""
PeriodicValidation - Keras callback - checks val_loss periodically instead of using Model.fit() every epoch
"""
class PeriodicValidation(Callback):
def __init__(self, val_data, batch_size, filepath):
super(PeriodicValidation, self).__init__()
self.val_data = val_data
self.batch_size = batch_size
self.filepath = filepath
self.min_val_loss = np.Inf
def on_epoch_end(self, epoch, logs={}):
if epoch % 5 == 4 or epoch % 5 == 2:
if self.filepath:
self.model.save(self.filepath+".ep_"+str(epoch)+".h5", overwrite=True)
if self.val_data is None:
return
h = self.model.evaluate(self.val_data[0], self.val_data[1], batch_size=self.batch_size, verbose=0)
print("validating on " + str(self.val_data[1].shape[0]) + " samples on epoch " + str(epoch) + ": ", h)
y_top_k_new_only = calculate_top_k_new_only(self.model,
self.val_data[0][0], self.val_data[0][1], sel | f.val_data[1], self.batch_size,
(not self.val_data[0][1].shape[2] == self.val_data[1].shape[1]))
print("testing MAP@K for NEW products: ", y_top_k_new_only)
if h[0] < self.min_val_loss:
if self.fil | epath:
self.model.save(self.filepath, overwrite=True)
print("val_loss improved from "+str(self.min_val_loss)+" to "+str(h[0])+", saving model to "+self.filepath)
else:
print("val_loss improved from "+str(self.min_val_loss)+" to "+str(h[0]))
self.min_val_loss = h[0]
def on_train_end(self, logs=None): # also log training metrics with higher decimal precision
print("epoch", [m for m in self.model.history.params['metrics']])
for epoch in self.model.history.epoch:
print(epoch, [self.model.history.history[m][epoch] for m in self.model.history.params['metrics']])
#
|
# -*- coding: u | tf8 -*-
from .task import TaskID
from .core import Handler
from .queue import EventQueue
__all__ = [
'TaskID',
'Handler',
'EventQueue' | ,
] |
from .PBXResolver import *
from .PBX_Constants import *
class PBX_Base(object):
def __init__(self, lookup_func, dictionary, project, identifier):
# default 'name' property of a PBX object is the type
self.name = self.__class__.__name__;
# this is the identifier for this object
self.identifier = str(identifier);
| # set of any referenced identifiers on this object
self.referencedIdentifiers = set();
def __attrs(self):
return (self.identifier);
def __repr__(self):
return | '(%s : %s : %s)' % (type(self), self.name, self.identifier);
def __eq__(self, other):
return isinstance(other, type(self)) and self.identifier == other.identifier;
def __hash__(self):
return hash(self.__attrs());
def resolve(self, type, item_list):
return filter(lambda item: isinstance(item, type), item_list);
def fetchObjectFromProject(self, lookup_func, identifier, project):
find_object = project.objectForIdentifier(identifier);
if find_object == None:
result = lookup_func(project.contents[kPBX_objects][identifier]);
if result[0] == True:
find_object = result[1](lookup_func, project.contents[kPBX_objects][identifier], project, identifier);
project.objects.add(find_object);
return find_object;
def parseProperty(self, prop_name, lookup_func, dictionary, project, is_array):
dict_item = dictionary[prop_name];
if is_array == True:
property_list = [];
for item in dict_item:
self.referencedIdentifiers.add(item);
find_object = self.fetchObjectFromProject(lookup_func, item, project);
property_list.append(find_object);
return property_list;
else:
self.referencedIdentifiers.add(dict_item);
return self.fetchObjectFromProject(lookup_func, dict_item, project);
|
##########################################################################
#
# Copyright (c) 2014, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONT | RACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
############################################# | #############################
import IECore
import Gaffer
import GafferScene
import GafferSceneTest
class PrimitiveVariablesTest( GafferSceneTest.SceneTestCase ) :
def test( self ) :
s = GafferScene.Sphere()
p = GafferScene.PrimitiveVariables()
p["in"].setInput( s["out"] )
self.assertScenesEqual( s["out"], p["out"] )
self.assertSceneHashesEqual( s["out"], p["out"] )
p["primitiveVariables"].addMember( "a", IECore.IntData( 10 ) )
self.assertScenesEqual( s["out"], p["out"], childPlugNamesToIgnore=( "object", ) )
self.assertSceneHashesEqual( s["out"], p["out"], childPlugNamesToIgnore=( "object", ) )
self.assertNotEqual( s["out"].objectHash( "/sphere" ), p["out"].objectHash( "/sphere" ) )
self.assertNotEqual( s["out"].object( "/sphere" ), p["out"].object( "/sphere" ) )
o1 = s["out"].object( "/sphere" )
o2 = p["out"].object( "/sphere" )
self.assertEqual( set( o1.keys() + [ "a" ] ), set( o2.keys() ) )
self.assertEqual( o2["a"], IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Constant, IECore.IntData( 10 ) ) )
del o2["a"]
self.assertEqual( o1, o2 )
if __name__ == "__main__":
unittest.main()
|
import logging
logger = logging.getLogger(__name__)
def get(isdsAppliance, check_mode=False, force=False):
"""
Retrieve available updates
"""
return isdsAppliance.invoke_get("Retrieving available updates",
"/updates/available.json")
def discover(isdsAppliance, check_mode=False, force=False):
"""
Discover available updates
"""
return isdsAppliance.invoke_get("Discover available updates",
"/updates/available/discover")
def upload(isdsAppliance, file, check_mode=False, force=False):
"""
Upload Available Update
"""
if force is True or _check_file(isdsAppliance, file) is False:
if check_mode is True:
return isdsAppliance.create_return_object(changed=True)
else:
return isdsAppliance.invoke_post_files(
"Upload Available Update",
"/updates/available",
[{
'file_formfield': 'uploadedfile',
'filename': file,
'mimetype': 'application/octet-stream'
}],
{}, json_response=False)
return isdsAppliance.create_return_object()
def _check_file(isdsAppliance, file):
"" | "
Parse the file name to see if it is already uploaded - use version and release date from pkg file name
Also check to see if the firmware level is already uploaded
Note: Lot depends on the name of the file.
:param isdsAppliance:
| :param file:
:return:
"""
import os.path
# If there is an exception then simply return False
# Sample filename - 8.0.1.9-ISS-ISDS_20181207-0045.pkg
logger.debug("Checking provided file is ready to upload: {0}".format(file))
try:
# Extract file name from path
f = os.path.basename(file)
fn = os.path.splitext(f)
logger.debug("File name without path: {0}".format(fn[0]))
# Split of file by '-' hyphen and '_' under score
import re
fp = re.split('-|_', fn[0])
firm_file_version = fp[0]
firm_file_product = fp[2]
firm_file_date = fp[3]
logger.debug("PKG file details: {0}: version: {1} date: {2}".format(firm_file_product, firm_file_version, firm_file_date))
# Check if firmware level already contains the update to be uploaded or greater, check Active partition
# firmware "name" of format - 8.0.1.9-ISS-ISDS_20181207-0045
import ibmsecurity.isds.firmware
ret_obj = ibmsecurity.isds.firmware.get(isdsAppliance)
for firm in ret_obj['data']:
# Split of file by '-' hyphen and '_' under score
fp = re.split('-|_', firm['name'])
firm_appl_version = fp[0]
firm_appl_product = fp[2]
firm_appl_date = fp[3]
logger.debug("Partition details ({0}): {1}: version: {2} date: {3}".format(firm['partition'], firm_appl_product, firm_appl_version, firm_appl_date))
if firm['active'] is True:
from ibmsecurity.utilities import tools
if tools.version_compare(firm_appl_version, firm_file_version) >= 0:
logger.info(
"Active partition has version {0} which is greater or equals than install package at version {1}.".format(
firm_appl_version, firm_file_version))
return True
else:
logger.info(
"Active partition has version {0} which is smaller than install package at version {1}.".format(
firm_appl_version, firm_file_version))
# Check if update uploaded - will not show up if installed though
ret_obj = get(isdsAppliance)
for upd in ret_obj['data']:
rd = upd['release_date']
rd = rd.replace('-', '') # turn release date into 20161102 format from 2016-11-02
if upd['version'] == fp[0] and rd == fp[3]: # Version of format 8.0.1.9
return True
except Exception as e:
logger.debug("Exception occured: {0}".format(e))
pass
return False
def install(isdsAppliance, type, version, release_date, name, check_mode=False, force=False):
"""
Install Available Update
"""
if force is True or _check(isdsAppliance, type, version, release_date, name) is True:
if check_mode is True:
return isdsAppliance.create_return_object(changed=True)
else:
ret_obj = isdsAppliance.invoke_post("Install Available Update",
"/updates/available/install",
{"updates": [
{
"type": type,
"version": version,
"release_date": release_date,
"name": name
}
]
})
isdsAppliance.facts['version'] = version
return ret_obj
return isdsAppliance.create_return_object()
def _check(isdsAppliance, type, version, release_date, name):
ret_obj = get(isdsAppliance)
for upd in ret_obj['data']:
# If there is an installation in progress then abort
if upd['state'] == 'Installing':
logger.debug("Detecting a state of installing...")
return False
if upd['type'] == type and upd['version'] == version and upd['release_date'] == release_date and upd[
'name'] == name:
logger.debug("Requested firmware ready for install...")
return True
logger.debug("Requested firmware not available for install...")
return False
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import re
import task_classes
from task_classes import QsubAnalysisTask
class DemoQsubAnalysisTask(QsubAnalysisTask):
"""
Demo task that will submit a single qsub job for the analysis
"""
def __init__(self, analysis, taskname = 'DemoQsubAnalysisTask', config_file = 'DemoQsubAnalysisTask.yml', extra_handlers = None):
"""
Parameters
----------
analysis: SnsWESAnalysisOutput
the `sns` pipeline output object to run the task on. If ``None`` is passed, ``self.analysis`` is retrieved instead.
extra_handlers: list
a list of extra Filehandlers to use for logging
"""
QsubAnalysisTask.__init__(self, taskname = taskname, config_file = config_file, analysis = analysis, extra_handlers = extra | _handlers)
def main(self, analysis):
"""
Main function for performing the analysis task on the entire analysis
Put your code for performing the analysis task on the entire analysis here
Paramet | ers
----------
analysis: SnsWESAnalysisOutput
the `sns` pipeline output object to run the task on. If ``None`` is passed, ``self.analysis`` is retrieved instead.
Returns
-------
qsub.Job
a single qsub job object
"""
self.logger.debug('Put your code for doing the analysis task in this function')
self.logger.debug('The global configs for all tasks will be in this dict: {0}'.format(self.main_configs))
self.logger.debug('The configs loaded from the task YAML file will be in this dict: {0}'.format(self.task_configs))
self.logger.debug('Analysis is: {0}'.format(analysis.id))
# output file
output_foo = self.get_analysis_file_outpath(file_basename = 'foo.txt')
output_bar = self.get_analysis_file_outpath(file_basename = 'bar.txt')
self.logger.debug('output_foo is: {0}'.format(output_foo))
self.logger.debug('output_bar is: {0}'.format(output_bar))
# get the dir for the qsub logs
qsub_log_dir = analysis.list_none(analysis.get_dirs('logs-qsub'))
self.logger.debug('qsub_log_dir is {0}:'.format(qsub_log_dir))
# make the shell command to run
command = 'touch "{0}"; touch "{1}"; sleep 10'.format(output_foo, output_bar)
self.logger.debug('command will be:\n{0}'.format(command))
# submit the command as a qsub job on the HPC
job = self.qsub.submit(command = command, name = self.taskname + '.' + analysis.id, stdout_log_dir = qsub_log_dir, stderr_log_dir = qsub_log_dir, verbose = True, sleeps = 1)
return(job)
|
๏ปฟ#!/usr/bin/python
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------#
# Security - Linux Authentication Tester with /etc/shadow #
# ============================================================================ #
# Not | e: To be used for test purpose only #
# Developer: Chavaillaz Johan | #
# Filename: LinuxAuthenticationTesterShadow.py #
# Version: 1.0 #
# #
# Licensed to the Apache Software Foundation (ASF) under one #
# or more contributor license agreements. See the NOTICE file #
# distributed with this work for additional information #
# regarding copyright ownership. The ASF licenses this file #
# to you under the Apache License, Version 2.0 (the #
# "License"); you may not use this file except in compliance #
# with the License. You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, #
# software distributed under the License is distributed on an #
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY #
# KIND, either express or implied. See the License for the #
# specific language governing permissions and limitations #
# under the License. #
# #
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# #
# LIBRARIES IMPORT #
# #
#------------------------------------------------------------------------------#
import sys
import crypt
import spwd
import argparse
#------------------------------------------------------------------------------#
# #
# UTILITIES FUNCTIONS #
# #
#------------------------------------------------------------------------------#
def checkAuthentication(shadowPwdDb, password):
"""
Test authentication in linux
:param shadowPwdDb: Shadow password database entry for the user
:type shadowPwdDb: spwd
:param password: Account password to test
:type password: str
"""
if crypt.crypt(password, shadowPwdDb) == shadowPwdDb:
return True
else:
return False
def bruteForce(username, dictionary):
"""
Authentication test for each password in the dictionary
with the given user name on the current computer
:param username: Username used to test each password in given dictionary
:type username: str
:param dictionary: Dictionary file path that contains all password
:type dictionary: str
"""
# Return the shadow password database entry for the given user name
shadowPwdDb = spwd.getspnam(username)[1]
# Open dictionary file
with open(dictionary) as file:
# Read each line : One line = One password
for line in file:
# Delete new line character
password = line.rstrip('\n')
# Check authentication
if checkAuthentication(shadowPwdDb, password):
return password
return False
#------------------------------------------------------------------------------#
# #
# "MAIN" FUNCTION #
# #
#------------------------------------------------------------------------------#
# If this is the main module, run this
if __name__ == '__main__':
argsCount = len(sys.argv)
# Create argument parser to help user
parser = argparse.ArgumentParser(
description='Test user authentication with a given dictionary.'
)
parser.add_argument(
'username',
type=str,
help='Username used to test each password in given dictionary file.'
)
parser.add_argument(
'dictionary',
type=str,
help='Dictionary file path that contains all password to test.'
)
# Show help if one of the arguments is missing
if argsCount != 3:
parser.print_help()
sys.exit()
# User and dictionary file in scripts arguments
username = sys.argv[1]
dictionary = sys.argv[2]
# Launch script
try:
password = bruteForce(username, dictionary)
if not password:
print("Password not found in dictionary")
else:
print("Password found : " + password)
except (OSError, IOError) as e:
print("Dictionary not found")
except KeyError:
print("User '%s' not found" % username) |
lient_mock = action_mocks.ActionMock(
searching.Find,
searching.Grep,
)
session_id = flow_test_lib.TestFlowHelper(
registry.RegistryFinder.__name__,
client_mock,
client_id=client_id,
keys_paths=keys_paths,
conditions=conditions,
creator=self.test_username)
return session_id
def testFindsNothingIfNothingMatchesTheGlob(self):
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [
"HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/NonMatch*"
])
self.assertFalse(flow_test_lib.GetFlowResults(client_id, session_id))
def testFindsKeysWithSingleGlobWithoutConditions(self):
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [
"HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/*"
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 2)
# We expect Sidebar and MctAdmin keys here (see
# test_data/client_fixture.py).
basenames = [os.path.basename(r.stat_entry.pathspec.path) for r in results]
self.assertCountEqual(basenames, ["Sidebar", "MctAdmin"])
def testFindsKeysWithTwoGlobsWithoutConditions(self):
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [
"HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/Side*",
"HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/Mct*"
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 2)
# We expect Sidebar and MctAdmin keys here (see
# test_data/client_fixture.py).
basenames = [os.path.basename(r.stat_entry.pathspec.path) for r in results]
self.assertCountEqual(basenames, ["Sidebar", "MctAdmin"])
def testFindsKeyWithInterpolatedGlobWithoutConditions(self):
user = rdf_client.User(sid="S-1-5-20")
client_id = self.SetupClient(0, users=[user])
session_id = self.RunFlow(client_id, [
"HKEY_USERS/%%users.sid%%/Software/Microsoft/Windows/"
"CurrentVersion/*"
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 1)
key = ("/HKEY_USERS/S-1-5-20/"
"Software/Microsoft/Windows/CurrentVersion/Run")
self.assertEqual(results[0].stat_entry.pathspec.CollapsePath(), key)
self.assertEqual(results[0].stat_entry.pathspec.path, key)
self.assertEqual(results[0].stat_entry.pathspec.pathtype,
rdf_paths.PathSpec.PathType.REGISTRY)
def testFindsNothingIfNothingMatchesLiteralMatchCondition(self):
vlm = rdf_file_finder.FileFinderContentsLiteralMatchCondition(
bytes_before=10, bytes_after=10, literal=b"CanNotFindMe")
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.VALUE_LITERAL_MATCH,
value_literal_match=vlm)
])
self.assertFalse(flow_test_lib.GetFlowResults(client_id, session_id))
def testFindsKeyIfItMatchesLiteralMatchCondition(self):
vlm = rdf_file_finder.FileFinderContentsLiteralMatchCondition(
bytes_before=10,
bytes_after=10,
literal=b"Windows Sidebar\\Sidebar.exe")
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.VALUE_LITERAL_MATCH,
value_literal_match=vlm)
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 1)
self.assertLen(results[0].matches, 1)
self.assertEqual(results[0].matches[0].offset, 15)
self.assertEqual(results[0].matches[0].data,
b"ramFiles%\\Windows Sidebar\\Sidebar.exe /autoRun")
self.assertEqual(
results[0].stat_entry.pathspec.CollapsePath(),
"/HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/Sidebar")
self.assertEqual(results[0].stat_entry.pathspec.pathtype,
rdf_paths.PathSpec.PathType.REGISTRY)
def testFindsNothingIfRegexMatchesNothing(self):
value_regex_match = rdf_file_finder.FileFinderContentsRegexMatchCondition(
bytes_before=10, bytes_after=10, regex=b".*CanNotFindMe.*")
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.VALUE_REGEX_MATCH,
value_regex_match=value_regex_match)
])
self.assertFalse(flow_test_lib.GetFlowResults(client_id, session_id))
def testFindsKeyIfItMatchesRegexMatchCondition(self):
value_regex_match = rdf_file_finder.FileFinderContentsRegexMatchCondition(
bytes_before=10, bytes_after=10, regex=b"Windows.+\\.exe")
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.VALUE_REGEX_MATCH,
value_regex_match=value_regex_match)
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 1)
self.assertLen(results[0].matches, 1)
self.assertEqual(results[0].matches[0].offset, 15)
self.assertEqual(results[0].matches[0].data,
b"ramFiles%\\Windows Sidebar\\Sidebar.exe /autoRun")
self.assertEqual(
results[0].stat_entry.pathspec.CollapsePath(),
"/HKEY_USERS/S-1-5-20/Software/Microsoft/Windows/"
"CurrentVersion/Run/Sidebar")
self.assertEqual(results[0].stat_entry.pathspec.pathtype,
rdf_paths.PathSpec.PathType.REGISTRY)
def testFindsNothingIfModiciationTimeConditionMatchesNothing(self):
modification_time = rdf_file_finder.FileFinderModificationTimeCondition(
min_last_modified_time=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0),
max_last_modified_time=rdfvalue.RDFDa | tetime.FromSecondsSinceEpoch(1))
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
| condition_type=registry.RegistryFinderCondition.Type
.MODIFICATION_TIME,
modification_time=modification_time)
])
self.assertFalse(flow_test_lib.GetFlowResults(client_id, session_id))
def testFindsKeysIfModificationTimeConditionMatches(self):
modification_time = rdf_file_finder.FileFinderModificationTimeCondition(
min_last_modified_time=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(
1247546054 - 1),
max_last_modified_time=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(
1247546054 + 1))
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.MODIFICATION_TIME,
modification_time=modification_time)
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 2)
# We expect Sidebar and MctAdmin keys here (see
# test_data/client_fixture.py).
basenames = [os.path.basename(r.stat_entry.pathspec.path) for r in results]
self.assertCountEqual(basenames, ["Sidebar", "MctAdmin"])
def testFindsKeyWithLiteralAndModificationTimeConditions(self):
modification_time = rdf_file_finder.FileFinderModificationTimeCondition(
min_last_modified_time=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(
1247546054 - 1),
max_last_modified_time=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(
1247546054 + 1))
vlm = rdf_file_finder.FileFinderContentsLiteralMatchCondition(
bytes_before=10,
bytes_after=10,
literal=b"Windows Sidebar\\Sidebar.exe")
client_id = self.SetupClient(0)
session_id |
from bitmovin.resources.models import AbstractModel
from bitmovin.resources import AbstractNameDescriptionResource
from bitmovin.errors import InvalidTypeError
from bitmovin.utils import Serializable
from .encoding_output import EncodingOutput
class Sprite(AbstractNameDescriptionResource, AbstractModel, Serializable):
def __init__(self, height, width, sprite_name, vtt_name, outputs, distance=None, id_=None, custom_data=None,
name=None, description=None):
super().__init__(id_=id_, custom_data=custom_data, name=name, description=description)
self._outputs = None
self.height = height
self.width = width
self.distance = distance
self.spriteName = sprite_name
self.vtt | Name = vtt_name
if outputs is not None and not isinstance(outputs, list):
raise InvalidTypeError('outputs must be a list')
self.outputs = outputs
| @classmethod
def parse_from_json_object(cls, json_object):
id_ = json_object['id']
custom_data = json_object.get('customData')
width = json_object.get('width')
height = json_object.get('height')
distance = json_object.get('distance')
sprite_name = json_object.get('spriteName')
vtt_name = json_object.get('vttName')
outputs = json_object.get('outputs')
name = json_object.get('name')
description = json_object.get('description')
sprite = Sprite(id_=id_, custom_data=custom_data, outputs=outputs, name=name, description=description,
height=height, width=width, sprite_name=sprite_name, vtt_name=vtt_name, distance=distance)
return sprite
@property
def outputs(self):
return self._outputs
@outputs.setter
def outputs(self, new_outputs):
if new_outputs is None:
return
if not isinstance(new_outputs, list):
raise InvalidTypeError('new_outputs has to be a list of EncodingOutput objects')
if all(isinstance(output, EncodingOutput) for output in new_outputs):
self._outputs = new_outputs
else:
outputs = []
for json_object in new_outputs:
output = EncodingOutput.parse_from_json_object(json_object)
outputs.append(output)
self._outputs = outputs
def serialize(self):
serialized = super().serialize()
serialized['outputs'] = self.outputs
return serialized
|
ry_user(user.fullname)['results']), 1)
assert_equal(len(query_user(merged_user.fullname)['results']), 0)
def test_employment(self):
user = factories.UserFactory(fullname='Helga Finn')
user.save()
institution = 'Finn\'s Fine Filers'
docs = query_user(institution)['results']
assert_equal(len(docs), 0)
user.jobs.append({
'institution': institution,
'title': 'The Big Finn',
})
user.save()
docs = query_user(institution)['results']
assert_equal(len(docs), 1)
def test_education(self):
user = factories.UserFactory(fullname='Henry Johnson')
user.save()
institution = 'Henry\'s Amazing School!!!'
docs = query_user(institution)['results']
assert_equal(len(docs), 0)
user.schools.append({
'institution': institution,
'degree': 'failed all classes',
})
user.save()
docs = query_user(institution)['results']
assert_equal(len(docs), 1)
def test_name_fields(self):
names = ['Bill Nye', 'William', 'the science guy', 'Sanford', 'the Great']
user = factories.UserFactory(fullname=names[0])
user.given_name = names[1]
user.middle_names = names[2]
user.family_name = names[3]
user.suffix = names[4]
user.save()
docs = [query_user(name)['results'] for name in names]
assert_equal(sum(map(len, docs)), len(docs)) # 1 result each
assert_true(all([user._id == doc[0]['id'] for doc in docs]))
class TestProject(OsfTestCase):
def setUp(self):
super(TestProject, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='John Deacon')
self.project = factories.ProjectFactory(title='Red Special', creator=self.user)
def test_new_project_private(self):
# Verify that a private project is not present in Elastic Search.
docs = query(self.project.title)['results']
assert_equal(len(docs), 0)
def test_make_public(self):
# Make project public, and verify that it is present in Elastic
# Search.
with run_celery_tasks():
self.project.set_privacy('public')
docs = query(self.project.title)['results']
assert_equal(len(docs), 1)
class TestNodeSearch(OsfTestCase):
def setUp(self):
super(TestNodeSearch, self).setUp()
with run_celery_tasks():
self.node = factories.ProjectFactory(is_public=True, title='node')
self.public_child = factories.ProjectFactory(parent=self.node, is_public=True, title='public_child')
self.private_child = factories.ProjectFactory(parent=self.node, title='private_child')
self.public_subchild = factories.ProjectFactory(parent=self.private_child, is_public=True)
self.node.node_license = factories.NodeLicenseRecordFactory()
self.node.save()
self.query = 'category:project & category:component'
@retry_assertion()
def test_node_license_added_to_search(self):
docs = query(self.query)['results']
node = [d for d in docs if d['title'] == self.node.title][0]
assert_in('license', node)
assert_equal(node['license']['id'], self.node.node_license.license_id)
@unittest.skip("Elasticsearch latency seems to be causing theses tests to fail randomly.")
@retry_assertion(retries=10)
def test_node_license_propogates_to_children(self):
docs = query(self.query)['results']
child = [d for d in docs if d['title'] == self.public_child.title][0]
assert_in('license', child)
assert_equal(child['license'].get('id'), self.node.node_license.license_id)
child = [d for d in docs if d['title'] == self.public_subchild.title][0]
assert_in('license', child)
assert_equal(child['license'].get('id'), self.node.node_license.license_id)
@unittest.skip("Elasticsearch latency seems to be causing theses tests to fail randomly.")
@retry_assertion(retries=10)
def test_node_license_updates_correctly(self):
other_license = NodeLicense.objects.get(name='MIT License')
new_license = factories.NodeLicenseRecordFactory(node_license=other_license)
self.node.node_license = new_license
self.node.save()
docs = query(self.query)['results']
for doc in docs:
assert_equal(doc['license'].get('id'), new_license.license_id)
class TestRegistrationRetractions(OsfTestCase):
def setUp(self):
super(TestRegistrationRetractions, self).setUp()
self.user = factories.UserFactory(fullname='Doug Bogie')
self.title = 'Red Special'
self.consolidate_auth = Auth(user=self.user)
self.project = factories.ProjectFactory(
title=self.title,
creator=self.user,
is_public=True,
)
self.registration = factories.RegistrationFactory(project=self.project, is_public=True)
@mock.patch('website.project.tasks.update_node_share')
@mock.patch('osf.models.registrations.Registration.archiving', mock.PropertyMock(return_value=False))
def test_retraction_is_searchable(self, mock_registration_updated):
self.registration.retract_registration(self.user)
self.registration.retraction.state = Retraction.APPROVED
self.registration.retraction.save()
self.registration.save()
self.registration.retraction._on_complete(self.user)
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
@mock.patch('osf.models.registrations.Registration.archiving', mock.PropertyMock(return_value=False))
def test_pending_retraction_wiki_content_is_searchable(self):
# Add unique string to wiki
wiki_content = {'home': 'public retraction test'}
for key, value in wiki_content.items():
docs = qu | ery(value)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.registration.create_o | r_update_node_wiki(name=key, content=value, auth=self.consolidate_auth)
# Query and ensure unique string shows up
docs = query(value)['results']
assert_equal(len(docs), 1)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
# Retract registration
self.registration.retract_registration(self.user, '')
with run_celery_tasks():
self.registration.save()
self.registration.reload()
# Query and ensure unique string in wiki doesn't show up
docs = query('category:registration AND "{}"'.format(wiki_content['home']))['results']
assert_equal(len(docs), 1)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
@mock.patch('osf.models.registrations.Registration.archiving', mock.PropertyMock(return_value=False))
def test_retraction_wiki_content_is_not_searchable(self):
# Add unique string to wiki
wiki_content = {'home': 'public retraction test'}
for key, value in wiki_content.items():
docs = query(value)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.registration.create_or_update_node_wiki(name=key, content=value, auth=self.consolidate_auth)
# Query and ensure unique string shows up
docs = query(value)['results']
assert_equal(len(docs), 1)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
# Retract registration
self.registration.retract_registration(self.user, '')
self.registration.retraction.state = Retraction.APPROVED
with run_celery_ta |
#!/usr/bin/env jython
import sys
#sys.path.append("/usr/share/java/itextpdf-5.4.1.jar")
sys.path.append("itextpdf-5.4.1.jar")
#sys.path.append("/usr/share/java/itext-2.0.7.jar")
#sys.path.append("/usr/share/java/xercesImpl.jar")
#sys.path.append("/usr/share/java/xml-apis.jar")
from java.io import FileOutputStream
from com.itextpdf.text.pdf import PdfReader,PdfStamper,BaseFont
#from com.lowagie.text.pdf import PdfReader,PdfStamper,BaseFont
#import re
import time
#import xmlrpclib
from SimpleXMLRPCServer import SimpleXMLRPCServer
def pdf_fill(orig_pdf,new_pdf,vals):
#print "pdf_fill",orig_pdf,new_pdf,vals
t0=time.time()
#print orig_pdf
rd=PdfReader(orig_pdf)
#print new_pdf
#print t0
st=PdfStamper(rd,FileOutputStream(new_pdf))
font=BaseFont.createFont("/usr/share/fonts/truetype/thai/Garuda.ttf",BaseFont.IDENTITY_H,BaseFont.EMBEDDED)
form=st.getAcroFields()
for k,v in vals.items():
try:
form.setFieldProperty(k,"textfont",font,None)
form.setField(k,v.decode('utf-8'))
except Exception,e:
raise Exception("Field %s: %s"%(k,str(e)))
st.setFormFlattening(True)
st.close()
t1=time.time()
#print "finished in %.2fs"%(t1-t0)
return True
def pdf_merge(pdf1,pdf2):
#print "pdf_merge",orig_pdf,vals
| t0=time.time()
pdf=pdf1
t1=time.time()
#print "finished in %.2fs"%(t1-t0)
return pdf
serv=SimpleXMLRPCServer(("localhost",9999))
serv.re | gister_function(pdf_fill,"pdf_fill")
serv.register_function(pdf_merge,"pdf_merge")
print "waiting for requests..."
serv.serve_forever()
|
from __future__ import print_function
import math, nltk
from termcolor import colored
from analyze import generate_stopwords, sanitize
from vector import Vector
class NaiveBayesClassifier():
def __init__(self):
"""
Creates:
"""
self.c = {"+" : Vector(), "-" : Vector()}
for vector in self.c.values():
vector.default = 1
self.classes = ["+", "-"]
self.prior = {"+" : 0.55, "-" : 0.45}
self.stopwords = generate_stopwords()
self.features = set()
f = open("data/features.txt", "r")
for line in f:
self.features.add(line.strip().lower())
f.close()
# Begin training
f_pos = open("data/train_pos.txt", "r")
f_neg = open("data/train_neg.txt", "r")
self.train("+", f_pos)
self.train("-", f_neg)
f_pos.close()
f_neg.close()
def train(self, sentiment, tweets):
"""
@param {string} sentiment = "+" || "-"
{iterable} tweets = file_with_tagged_tweets
@return None
"""
freq = self.c[sentiment]
total = 0.0
for tweet in tweets:
total += 1
words = sanitize(tweet, self.stopwords)
for word in words:
if word in self.features: # word in our pre-made features list
freq[word] += 100
for word in freq:
freq[word] = freq[word] / total
freq.default = 1/total
def posterior(self, sentiment, sanitized_tweet):
"""
Computes the posterior (Bayesian Probability term) of a sanitized tweet
Probability model for a classifier is a conditional model
p(C, F1,...,Fn) = ( p(c)p(F1,...,Fn|C) ) / p(F1,...,Fn)
...
In English, using Bayesian Probability terminology, the equation can be written as
prior * likelihood
posterior = --------------------
evidence
in our case, we have:
p(sentiment, sanitized_tweet)
@param {string} sentiment = "+" or "-"
{set} sanitized_tweet = set of sanitized words in tweet
@return {float}
"""
#print "sanitized tweet = %s" % sanitized_tweet
#print math.log(self.prior[sentiment])
#print "self.prior[sentiment] = %s" % self.prior[sentiment]
p = math.log(self.prior[sentiment])
values = self.c[sentiment]
#print "%s : original p: %f" % (sentiment, p)
for word in sanitized_tweet:
if word in self.features: # word is in the features list, so apply the score for the feature based on the sentiment
p += math.log(values[word])
# print "%s : %f" % (word, math.log(values[word]))
else:
| p += math.log(.1 - values[word])
# print "%s : %f" % (word, math.log(.1 - values[word]))
#print p
return p
'''
for feature in s | elf.features:
#print "c[%s] = %s" % (feature, c[feature])
if feature in sanitized_tweet:
p += math.log(1 - c[feature]) # add feature's score per the sentiment
else:
p += math.log(1 - c[feature])
return p
'''
def classify(self, tweet, verbose=False, eval=False):
"""
Classifies a text's sentiment given the posterior of of its class
Picks the largest posterior between that of "+" and "-"
However, if there is not enough confidence (i.e. if mpt posterior(c1|tweet) < 2*posterior(c2|tweet),
then we classify as neutral ("~") because we don't have conclusive evidence
@param {string} tweet
@return {string} sentiment = "+" || "-" || "~"
"""
sanitized = sanitize(tweet, self.stopwords)
# print sanitized
sentiment = {}
bigrams = nltk.bigrams(sanitized)
trigrams = nltk.trigrams(sanitized)
if len(sanitized) <= 22:
for s in self.classes:
sentiment[s] = self.posterior(s, sanitized) # Calculate posterior for positive and negative sentiment
if verbose: print(s, sanitized, self.posterior(s, sanitized))
elif len(sanitized) == 23:
for s in self.classes:
for pair in bigrams:
sentiment[s] = self.posterior(s, pair)
if verbose: print (s, pair, self.posterior(s, pair))
else:
# use trigram model
for s in self.classes:
for tri in trigrams:
sentiment[s] = self.posterior(s, tri)
if verbose: print (s, tri, self.posterior(s, tri))
positive = sentiment["+"] # Get calculated posterior of positive sentiment
negative = sentiment["-"] # Get calculated posterior fo negative sentiment
#print "positive: %s negative: %s" % (positive, negative)
if "not" in sanitized or "despite" in sanitized:
if positive > + math.log(1.3) + negative:
negative = abs(negative)
elif negative > math.log(9) + positive:
positive = abs(positive)
if verbose: print("positive: %f negative: %f" % (positive, negative))
if positive > + math.log(1.3) + negative:
if eval: return "+"
else: print(colored('+', 'green'))
elif negative > math.log(.9)+positive:
if eval: return "-"
else: print(colored('-', 'red'))
else:
if eval: return "~"
else: print(colored('~', 'white'))
def evaluate(self):
totalp = totaln = 0
t = w = 0 # total = 0, wrong = 0
fp = fn = 0 # false positive = 0, false negative = 0
for tweet in open("data/verify_pos.txt"):
t += 1.0
totalp += 1.0
e = self.classify(tweet, False, eval=True)
if e != "+":
if e == "-": fn += 1
w += 1.0
tp = t - w # true positive
print(colored('Positive', 'green'), end="")
print(" - accuracy: %.2f%%" % self.accuracy(w, t)) # make function that displays values correctly
t = w = 0
for tweet in open("data/verify_neg.txt"):
t += 1.0
totaln += 1.0
e = self.classify(tweet, False, eval=True)
if e != "-":
if e == "+": fp += 1
w += 1.0
tn = t - w # true negative
print(colored('Negative', 'red'), end="")
print(" - accuracy: %.2f%%" % self.accuracy(w, t))
w = t = 0
for tweet in open("data/verify_neutral.txt"):
t += 1.0
if "~" != self.classify(tweet, verbose=False, eval=True):
w += 1.0
# print "Neutral - accuracy: %s" % self.accuracy(w, t)
# Precision
# = TP / (TP + FP)
precision = (tp / (tp + fp))
print(colored("\nPrecision: ", "magenta") + "%.2f" % precision)
# Recall
# = TP / (TP + FN)
recall = (tp / (tp + fn))
print(colored("Recall: ", "magenta") + "%.2f" % recall)
# Accuracy
# = (TP + TN) / (P + N)
accuracy = (tp + tn) / (totalp + totaln) * 100
print(colored("Accuracy: ", "magenta") + "%.2f%%" % accuracy)
# F-score
# measure of test's accuracy - considers both the precision and recall
f_score = 2 * (precision*recall) / (precision+recall)
print(colored("\nF-Measure: ", "cyan") + "%.2f" % f_score)
def accuracy(self, w, t):
return (1 - (w/t)) * 100
def __repr__(self):
pass
c = NaiveBayesClassifier()
|
# -*- codin | g: utf-8 -*-
# ยฉ 2015 Eficent Business and IT Consulting Services S.L. -
# Jordi Ballester Alomar
# License AGPL-3.0 or later (https://www.gnu.org/licens | es/agpl.html).
from . import models
|
'''
compile_test.py - check pyximport functionality with pysam
==========================================================
test script for checking if compilation against
pysam and tabix works.
'''
# clean up previous compilation
import os
import unittest
import pysam
from TestUtils import make_data_files, BAM_DATADIR, TABIX_DATADIR
def setUpModule():
make_data_files(BAM_DATADIR)
make_data_files(TABIX_DATADIR)
try:
os.unlink('tests/_compile_test.c')
os.unlink('tests/_compile_test.pyxbldc')
except OSError:
pass
import pyximport
pyximport.install(build_in_temp=False)
import _compile_test
class BAMTest(unittest.TestCase):
input_filename = os.path.join(BAM_DATADIR, "ex1.bam")
def testCount(self):
nread = _compile_test.testCountBAM(
pysam.Samfile(self.input_filename))
self.assert | Equal(nread, 3270)
class GTFTest(unittest.TestCase):
input_filename = os.path.join(TABIX_DATADIR, "example.gtf.gz")
def testCount(self):
nread = _compile_test.testCountGTF(
py | sam.Tabixfile(self.input_filename))
self.assertEqual(nread, 237)
if __name__ == "__main__":
unittest.main()
|
""" Title: Ch3LpfPlotResponse - Chapter 3: Plot filter response
Author: Ricardo Alejos
Date: 2016-09-20
Description: Plots the micro-strip filter response against the specifications
Version: 1.0.0
Comments: -
"""
# Import Python's built-in | modules
import csv as _csv
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import math as _math
# Add project root directory to sys.path so other modules can be imported
_projectRoot = _os.path.abspath(__file__ + "\\..\\..\\..")
if _projectRoot not in _sys.path:
_sys.path.insert(0, _projectRoot)
_strThisFileName = _os.path.splitext(_os.path.basename(__file__))[0]
import pkg.Algorithm.SimAnnMin as _sam
import pkg.ObjectiveFunctions.MsLpf as _lpf
import p | kg.Helpers.MatlabFunctions as _mf
def _initLogger():
global logger
logger = _logging.getLogger(_strThisFileName)
logger.setLevel(_logging.DEBUG)
map(logger.removeHandler, logger.handlers[:])
ch = _logging.StreamHandler(_sys.stdout)
ch.setLevel(_logging.INFO)
fh = _logging.FileHandler(_strThisFileName + ".log")
fh.setLevel(_logging.DEBUG)
formatter = _logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
logger.addHandler(ch)
logger.addHandler(fh)
logger.debug("A new logger session has started.")
_initLogger()
cases = (
dict(
title = ("Filter response using","\\it{n}\\rm=2 and \\bf\\it{x}\\rm=\\bf\\it{x}\\rm_0"),
n = 2,
w1 = 1.222,
l1 = 5.4050,
w2 = 2.5944,
filename = "ch3_fresp_n2_x0"
),
dict(
title = ("Filter response using","\\it{n}\\rm=4 and \\bf\\it{x}\\rm=\\bf\\it{x}\\rm_0"),
n = 4,
w1 = 1.222,
l1 = 5.4050,
w2 = 2.5944,
filename = "ch3_fresp_n4_x0"
),
dict(
title = ("Filter response using","\\it{n}\\rm=8 and \\bf\\it{x}\\rm=\\bf\\it{x}\\rm_0"),
n = 8,
w1 = 1.222,
l1 = 5.4050,
w2 = 2.5944,
filename = "ch3_fresp_n8_x0"
),
dict(
title = ("Filter response using","\\it{n}\\rm=2 and \\bf\\it{x}\\rm=\\bf\\it{x}\\rm*"),
n = 2,
w1 = 1.5242,
l1 = 4.9000,
w2 = 2.4500,
filename = "ch3_fresp_n2_xopt"
),
dict(
title = ("Filter response using","\\it{n}\\rm=4 and \\bf\\it{x}\\rm=\\bf\\it{x}\\rm*"),
n = 4,
w1 = 1.4564,
l1 = 4.9000,
w2 = 2.4500,
filename = "ch3_fresp_n4_xopt"
),
dict(
title = ("Filter response using","\\it{n}\\rm=8 and \\bf\\it{x}\\rm=\\bf\\it{x}\\rm*"),
n = 8,
w1 = 1.3750,
l1 = 4.9000,
w2 = 3.0625,
filename = "ch3_fresp_n8_xopt"
),
)
def PlotResponse(w1, l1, w2, n, title, filename = None):
resp = _lpf.getRawResponseData([w1, l1, w2], n)
freq = resp["freq"]
s21m = [_math.sqrt(resp["s21r"][i]**2+resp["s21i"][i]**2) for i in range(len(freq))]
spec = (([0,0,6e9,6e9],[0.85,0.9,0.9,0.85]), ([8e9,8e9,10e9,10e9],[0.15,0.1,0.1,0.15]))
_mf.PlotVsSpecs(
freq,
s21m,
spec,
title,
"Frequency (Hz)",
"|S_{21}|",
filename
)
def main():
with open(_strThisFileName + "_" + _time.strftime('%Y%m%d%H%M%S') + ".csv", "wb") as fhReport:
lRptFld = [
"k",
"iter",
"ui",
"uo"
]
cwReport = _csv.DictWriter(fhReport, lRptFld)
cwReport.writeheader()
lstSaCfg = ["TTT"]
numItn = 50
dicTmeFun = dict(
T = _sam.TmeFns.typical(numItn),
F = _sam.TmeFns.fast(numItn),
S = _sam.TmeFns.slow(numItn)
)
dicSseFun = dict(
T = _sam.SseFns.typical,
F = _sam.SseFns.fast,
S = _sam.SseFns.slow
)
dicAceFun = dict(
T = _sam.AceFns.typical,
F = _sam.AceFns.fast,
S = _sam.AceFns.slow
)
lstK = [8] #[2,4,8]
for strSaCfg in lstSaCfg:
for k in lstK:
uk = _lpf.getInterfaceFunction(k)
logger.info("Running SAM using the %s configuration."%strSaCfg)
dReportRow = dict((key, None) for key in lRptFld)
dReportRow["k"] = k
SamObj = _sam.SimAnnMin()
SamObj.setObeFun(uk)
SamObj.setTmeLst(dicTmeFun[strSaCfg[0]])
SamObj.setSseFun(dicSseFun[strSaCfg[1]])
SamObj.setAceFun(dicAceFun[strSaCfg[2]])
SamObj.setX0([-0.7,0.5,0.1])
SamObj.runAll()
lstUi = SamObj._lUi
lstUo = SamObj._lUo
lstIter = range(len(lstUi))
_mf.Plot(lstIter,
lstUi,
"Evolution of \\it{u}\\rm_{%d} during SA optimiziation."%(k),
"Iteration",
"\\it{u}\\rm_{%d}"%(k),
"sa-evol_u%dall"%(k))
_mf.Plot(lstIter,
lstUo,
"Evolution of \\it{u}\\rm_{%d}* during SA optimization"%(k),
"Iteration",
"\\it{u}\\rm_{%d}"%(k),
"sa-evol_u%dopt"%(k))
for iter in lstIter:
dReportRow["iter"] = iter
dReportRow["ui"] = "%0.4f"%lstUi[iter]
dReportRow["uo"] = "%0.4f"%lstUo[iter]
logger.info("Finished processing of u%d"%k)
cwReport.writerow(dReportRow)
main() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2018 Jonathan Peirce
# Distributed under the terms of the GNU General Public License (GPL).
# Support for fake joystick/gamepad during devlopment
# if no 'real' joystick/gamepad is available use keyboard emulation
# 'ctrl' + 'alt' + numberKey
from __future__ import absolute_import, division, print_function
from psychopy import event
class Joystick(object):
def __init__(self, device_number):
self.device_number = device_number
self.numberKeys = ['0','1','2','3','4','5','6','7','8','9']
self.modifierKeys = ['ctrl','alt']
self.mouse = event.Mouse()
def getNumButtons(self):
return(len(self.numberKeys))
def getAllButtons(self):
keys = event.getKeys(keyList=self.numberKeys, modifiers=True)
values = [key for key, modifiers in keys if all([modifiers[modKey] for modKey in self.modifierKeys])]
self.state = [key in values for key in self.numberK | eys]
mouseButtons = self.mouse.getPressed()
self.state[:len(mouseButtons)] = [a or b != 0 for (a,b) in zip(self.state, mouseButtons)]
return(self.state)
def getX(self):
(x, y) = self.mouse.getPos()
return x
def ge | tY(self):
(x, y) = self.mouse.getPos()
return y
|
# -*- coding: L | atin-1 -*-
# Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See License.txt for complete terms.
# file object -> CybOX File Object mappings
file_object_mappings = {'file_format': 'file_format',
'type': 'type',
| 'file_name': 'file_name',
'file_path': 'file_path',
'size': 'size_in_bytes',
'magic_number': 'magic_number',
'file_extension': 'file_extension',
'entropy': 'peak_entropy'}
|
, sideB = self.sqGetPointLists()
toDeleteList = []
p = 2
for | k in range((sideA+2), (sideB-1)):
if p%2 == 0:
toDeleteList.append(self.baseCurve+".cv["+str(k)+"]")
toDeleteList.append(self.baseCurve+".cv["+str(k+len(pointListA)-1)+"]")
p = p+1
q = 2
m = sideA-2
if | m >= 0:
while m >= 0:
if not m == sideA and not m == sideB:
if q%2 == 0:
toDeleteList.append(self.baseCurve+".cv["+str(m)+"]")
m = m-1
q = q+1
cmds.delete(toDeleteList)
cmds.insertKnotCurve([self.baseCurve+".u["+str(len(pointListA)-1)+"]", self.baseCurve+".ep["+str(len(pointListA)-1)+"]"], constructionHistory=True, curveOnSurface=True, numberOfKnots=1, addKnots=False, insertBetween=True, replaceOriginal=True)
pointListA, pointListB, sideA, sideB = self.sqGetPointLists()
posListA, posListB = [], []
for i in range(0, len(pointListA)-1):
posListA.append(cmds.xform(pointListA[i], query=True, worldSpace=True, translation=True))
posListB.append(cmds.xform(pointListB[i], query=True, worldSpace=True, translation=True))
self.mainCurveA = cmds.curve(name="StickyLips_Main_A_Crv", degree=1, point=posListA)
self.mainCurveB = cmds.curve(name="StickyLips_Main_B_Crv", degree=1, point=posListB)
cmds.rename(cmds.listRelatives(self.mainCurveA, children=True, shapes=True)[0], self.mainCurveA+"Shape")
cmds.rename(cmds.listRelatives(self.mainCurveB, children=True, shapes=True)[0], self.mainCurveB+"Shape")
cmds.select(self.mainCurveA+".cv[*]")
self.curveLenght = len(cmds.ls(selection=True, flatten=True))
cmds.select(clear=True)
self.sqCheckCurveDirection(self.mainCurveA)
self.sqCheckCurveDirection(self.mainCurveB)
self.baseCurveA = cmds.duplicate(self.mainCurveA, name=self.mainCurveA.replace("_Main_", "_Base_"))[0]
self.baseCurveB = cmds.duplicate(self.mainCurveB, name=self.mainCurveB.replace("_Main_", "_Base_"))[0]
cmds.delete(self.baseCurve)
self.maxIter = len(posListA)
cmds.group(self.mainCurveA, self.mainCurveB, self.baseCurveA, self.baseCurveB, name="StickyLips_StaticData_Grp")
else:
mel.eval("warning \"Please, select an closed edgeLoop.\";")
def sqCheckCurveDirection(self, thisCurve, *args):
posMinX = cmds.xform(thisCurve+".cv[0]", query=True, worldSpace=True, translation=True)[0]
posMaxX = cmds.xform(thisCurve+".cv["+str(self.curveLenght-1)+"]", query=True, worldSpace=True, translation=True)[0]
if posMinX > posMaxX:
cmds.reverseCurve(thisCurve, constructionHistory=False, replaceOriginal=True)
def sqGetPointLists(self, *args):
cmds.select(self.baseCurve+".cv[*]")
pointList = cmds.ls(selection=True, flatten=True)
minX = 0
maxX = 0
sideA = 0
sideB = 0
for i in range(0, len(pointList)):
pointPosX = cmds.xform(pointList[i], query=True, worldSpace=True, translation=True)[0]
if pointPosX < minX:
minX = pointPosX
sideA = i
elif pointPosX > maxX:
maxX = pointPosX
sideB = i
if sideA > sideB:
sideC = sideA
sideA = sideB
sideB = sideC
pointListA = pointList[sideA:(sideB+1)]
pointListB = pointList[sideB:]
for j in range(0, (sideA+1)):
pointListB.append(pointList[j])
return pointListA, pointListB, sideA, sideB
def sqCreateClusters(self, curveA, curveB, *args):
self.clusterList = []
for i in range(1, self.curveLenght-1):
self.clusterList.append(cmds.cluster([curveA+".cv["+str(i)+"]", curveB+".cv["+str(i)+"]"], name="StickyLips_"+str(`i-1`)+"_Cls")[1])
return self.clusterList
def sqGenerateMuscleLocators(self, *args):
muscleLoaded = True
if not cmds.pluginInfo('MayaMuscle.mll', query=True, loaded=True):
muscleLoaded = False
try:
cmds.loadPlugin('MayaMuscle.mll')
muscleLoaded = True
except:
print "Error: Can not load the Maya Muscle plugin!"
pass
if muscleLoaded:
minIndex = 0
minPosX = 1000000000000000 # just to avoid non centered characters
minPosId = 0
vertexPairList = []
muscleLocatorList = []
for e, edgeName in enumerate(self.edgeList):
tempCompList = cmds.polyListComponentConversion(edgeName, fromEdge=True, toVertex=True)
tempExpList = cmds.filterExpand(tempCompList, selectionMask=31, expand=True)
vertexPairList.append(tempExpList)
edgePosA = cmds.xform(tempExpList[0], query=True, worldSpace=True, translation=True)[0]
edgePosB = cmds.xform(tempExpList[1], query=True, worldSpace=True, translation=True)[0]
if edgePosA < minPosX:
minIndex = e
minPosX = edgePosA
minPosId = 0
if edgePosB < minPosX:
minIndex = e
minPosX = edgePosB
minPosId = 1
usedIndexList = []
usedIndexList.append(minIndex)
lastIndexUp = minIndex
lastIndexDown = 0
upEdgeList = []
upEdgeList.append(self.edgeList[minIndex])
downEdgeList = []
for i in range(0, len(vertexPairList)-1):
if not i == minIndex:
if vertexPairList[i][0] in vertexPairList[minIndex][minPosId] or vertexPairList[i][1] in vertexPairList[minIndex][minPosId]:
downEdgeList.append(self.edgeList[i])
usedIndexList.append(i)
lastIndexDown = i
for i in range(0, self.maxIter-2):
for j in range(0, len(vertexPairList)):
if not j in usedIndexList:
if vertexPairList[j][0] in vertexPairList[lastIndexUp] or vertexPairList[j][1] in vertexPairList[lastIndexUp]:
upEdgeList.append(self.edgeList[j])
usedIndexList.append(j)
lastIndexUp = j
break
for j in range(0, len(vertexPairList)):
if not j in usedIndexList:
if vertexPairList[j][0] in vertexPairList[lastIndexDown] or vertexPairList[j][1] in vertexPairList[lastIndexDown]:
downEdgeList.append(self.edgeList[j])
usedIndexList.append(j)
lastIndexDown = j
break
upMinusDown = len(upEdgeList) - len(downEdgeList)
downMinusUp = len(downEdgeList) - len(upEdgeList)
if upMinusDown > 1:
for i in range(0, upMinusDown):
if not len(upEdgeList) == (self.maxIter-3):
downEdgeList.append(upEdgeList[len(upEdgeList)-1])
upEdgeList = upEdgeList[:-1]
if downMinusUp > 1:
for i in range(0, downMinusUp):
if not len(upEdgeList) == (self.maxIter-3):
upEdgeList.append(downEdgeList[len(downEdgeList)-1])
downEdgeList = downEdgeList[:-1]
upEdgeList = upEdgeList[:self.maxIter-1]
dow |
logging.getLogger(__name__), energieopwek_nl=True):
if target_datetime is None:
target_datetime = arrow.utcnow()
else:
target_datetime = arrow.get(target_datetime)
r = session or requests.session()
consumptions = ENTSOE.fetch_consumption(zone_key=zone_key,
session=r,
target_datetime=target_datetime,
logger=logger)
if not consumptions:
return
for c in consumptions:
del c['source']
df_consumptions = pd.DataFrame.from_dict(consumptions).set_index(
'datetime')
# NL has exchanges with BE, DE, NO, GB, DK-DK1
exchanges = []
for exchange_key in ['BE', 'DE', 'GB']:
zone_1, zone_2 = sorted([exchange_key, zone_key])
exchange = ENTSOE.fetch_exchange(zone_key1=zone_1,
zone_key2=zone_2,
session=r,
target_datetime=target_datetime,
logger=logger)
if not exchange:
return
exchanges.extend(exchange or [])
# add NO data, fetch once for every hour
# This introduces an error, because it doesn't use the average power flow
# during the hour, but rather only the value during the first minute of the
# hour!
zone_1, zone_2 = sorted(['NO', zone_key])
exchange_NO = [statnett.fetch_exchange(zone_key1=zone_1, zone_key2=zone_2,
session=r, target_datetime=dt.datetime,
logger=logger)
for dt in arrow.Arrow.range(
'hour',
arrow.get(min([e['datetime']
for e in exchanges])).replace(minute=0),
arrow.get(max([e['datetime']
for e in exchanges])).replace(minute=0))]
exchanges.extend(exchange_NO)
# add DK1 data (only for dates after operation)
if target_datetime > arrow.get('2019-08-24', 'YYYY-MM-DD') :
zone_1, zone_2 = sorted(['DK-DK1', zone_key])
df_dk = pd.DataFrame(DK.fetch_exchange(zone_key1=zone_1, zone_key2=zone_2,
session=r, target_datetime=target_datetime,
logger=logger))
# Because other exchanges and consumption data is only available per hour
# we floor the timpstamp to hour and group by hour with averaging of netFlow
df_dk['datetime'] = df_dk['datetime'].dt.floor('H')
exchange_DK = df_dk.groupby(['datetime']).aggregate({'netFlow' : 'mean',
'sortedZoneKeys': 'max', 'source' : 'max'}).reset_index()
# because averaging with high precision numbers leads to rounding errors
exchange_DK = exchange_DK.round({'netFlow': 3})
exchanges.extend(exchange_DK.to_dict(orient='records'))
# We want to know the net-imports into NL, so if NL is in zone_1 we need
# to flip the direction of the flow. E.g. 100MW for NL->DE means 100MW
# export to DE and needs to become -100MW for import to NL.
for e in exchanges:
if(e['sortedZoneKeys'].startswith('NL->')):
e['NL_import'] = -1 * e['netFlow']
else:
e['NL_import'] = e['netFlow']
del e['source']
del e['netFlow']
df_exchanges = pd.DataFrame.from_dict(exchanges).set_index('datetime')
# Sum all exchanges to NL imports
df_exchanges = df_exchanges.groupby('datetime').sum()
# Fill missing values by propagating the value forward
df_consumptions_with_exchanges = df_consumptions.join(df_exchanges).fillna(
method='ffill', limit=3) # Limit to 3 x 15min
# Load = Generation + netImports
# => Generation = Load - netImports
df_total_generations = (df_consumptions_with_exchanges['consumption']
- df_consumptions_with_exchanges['NL_import'])
# Fetch all production
# The energieopwek_nl parser is backwards compatible with ENTSOE parser.
# Because of data quality issues we switch to using energieopwek, but if
# data quality of ENTSOE improves we can switch back to using a single
# source.
productions_ENTSOE = ENTSOE.fetch_production(zone_key=zone_key, session=r,
target_datetime=target_datetime, logger=logger)
if energieopwek_nl:
productions_eopwek = fetch_production_energieopwek_nl(session=r,
target_datetime=target_datetime, logger=logger)
# For every production | value we look up the corresponding ENTSOE
# valu | es and copy the nuclear, gas, coal, biomass and unknown production.
productions = []
for p in productions_eopwek:
entsoe_value = next((pe for pe in productions_ENTSOE
if pe["datetime"] == p["datetime"]), None)
if entsoe_value:
p["production"]["nuclear"] = entsoe_value["production"]["nuclear"]
p["production"]["gas"] = entsoe_value["production"]["gas"]
p["production"]["coal"] = entsoe_value["production"]["coal"]
p["production"]["biomass"] = entsoe_value["production"]["biomass"]
p["production"]["unknown"] = entsoe_value["production"]["unknown"]
productions.append(p)
else:
productions = productions_ENTSOE
if not productions:
return
# Flatten production dictionaries (we ignore storage)
for p in productions:
# if for some reason therรฉ's no unknown value
if not 'unknown' in p['production'] or p['production']['unknown'] == None:
p['production']['unknown'] = 0
Z = sum([x or 0 for x in p['production'].values()])
# Only calculate the difference if the datetime exists
# If total ENTSOE reported production (Z) is less than total generation
# (calculated from consumption and imports), then there must be some
# unknown production missing, so we add the difference.
# The difference can actually be negative, because consumption is based
# on TSO network load, but locally generated electricity may never leave
# the DSO network and be substantial (e.g. Solar).
if p['datetime'] in df_total_generations and Z < df_total_generations[p['datetime']]:
p['production']['unknown'] = round((
df_total_generations[p['datetime']] - Z + p['production']['unknown']), 3)
# Filter invalid
# We should probably add logging to this
return [p for p in productions if p['production']['unknown'] > 0]
def fetch_production_energieopwek_nl(session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
if target_datetime is None:
target_datetime = arrow.utcnow()
# Get production values for target and target-1 day
df_current = get_production_data_energieopwek(
target_datetime, session=session)
df_previous = get_production_data_energieopwek(
target_datetime.shift(days=-1), session=session)
# Concat them, oldest first to keep chronological order intact
df = pd.concat([df_previous, df_current])
output = []
base_time = arrow.get(target_datetime.date(), 'Europe/Paris').shift(days=-1).to('utc')
for i, prod in enumerate(df.to_dict(orient='records')):
output.append(
{
'zoneKey': 'NL',
'datetime': base_time.shift(minutes=i*15).datetime,
'production': prod,
'source': 'energieopwek.nl, entsoe.eu'
}
)
return output
def get_production_data_energieopwek(date, session=None):
r = session or requests.session()
# The API returns values per day from local time midnight until the last
# round 10 minutes if the requested date is today or for the entire day if
# it's in the past. 'sid' can be anything.
url = 'http://energieopwek.nl/jsonData.php?s |
import argparse
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pickle
from sklearn.preprocessing import StandardScaler, normalize
import sys
import common
FACT = 'pmi' # nmf/pmi_wl/pmi_wp/pmi_wlp
DIM = 200
DATASET = 'MSDmm'
WINDOW = 1
NSAMPLES = 'all' #all
MAX_N_SCALER = 300000
N_PATCHES = 3
def scale(X, scaler=None, max_N=MAX_N_SCALER):
shape = X.shape
X.shape = (shape[0], shape[2] * shape[3])
if not scaler:
scaler = StandardScaler()
N = pd.np.min([len(X), max_N]) # Limit the number of patches to fit
scaler.fit(X[:N])
X = scaler.transform(X)
X.shape = shape
return X, scaler
def load_X(args):
data_path = '../data/patches_%s_%s/' % (DATASET, args.window)
progress_update = 1
data_files = glob.glob(os.path.join(data_path, "*.npy"))
#songs_in = set(open(common.DATASETS_DIR+'/trainset_%s.tsv' %
# (args.dataset)).read().splitlines())
if len(data_files) == 0:
raise ValueError("Error: Empty directory %s" % data_path)
index_factors = set(open(common.DATASETS_DIR+'/items_index_train_'+DATASET+'.tsv').read().splitlines())
data_files_in = []
for file in data_files:
filename = file[file.rfind('/')+1:-4]
item_id, npatch = filename.split('_')
if int(npatch) < args.npatches and item_id in index_factors:
data_files_in.append(file)
all_X = []
songs_dataset = []
X_mbatch = np.load(data_files_in[0])
X = np.zeros((len(data_files_in),1,X_mbatch.shape[0],X_mbatch.shape[1]))
for i, data_file in enumerate(data_files_in):
song_id = data_file[data_file.rfind('/')+1:data_file.rfind('_')]
X_mbatch = np.load(data_file)
X[i,0,:,:] = X_mbatch
#if len(all_Y) == 0:
# plt.imshow(X_mbatch,interpolation='nearest',aspect='equal')
# plt.show()
#all_X.append(X_mbatch.reshape(-1,X_mbatch.shape[0],X_mbatch.shape[1]))
songs_dataset.append(song_id)
if i % progress_update == 0:
sys.stdout.write("\rLoading Data: %.2f%%" % (100 * i / float(len(data_files_in))))
sys.stdout.flush()
sys.stdout.write("\rLoading Data: 100%")
sys.stdout.flush()
print "X data loaded"
output_suffix_X = '%s_%sx%s' % (args.dataset,args.npatches,args.window)
scaler_file=common.DATASETS_DIR+'/train_data/scaler_%s.pk' % output_suffix_X
X,scaler = scale(X)
pickle.dump(scaler,open(scaler_file,'wb'))
X_file = common.DATASETS_DIR+'/train_data/X_train_'+output_suffix_X
np.save(X_file,X)
fw=open(common.DATASETS_DIR+'/train_data/index_train_'+output_suffix_X+'.tsv','w')
fw.write("\n".join(songs_dataset))
def load_Y(args):
progress_update = 1
output_suffix_X = '%s_%sx%s' % (args.dataset,args.npatches,args.window)
index_X=open(common.DATASETS_DIR+'/train_data/index_train_'+output_suffix_X+'.tsv').read().splitlines()
song_factors=np.load(common.DATASETS_DIR+'/item_factors_%s_%s_%s.npy' % (args.fact,args.dim,args.dataset))
song_index=open(common.DATASETS_DIR+'/items_index_%s.tsv' % (args.dataset)).read().splitlines()
#print common.DATASETS_DIR+'/song_factors_%s_%s_%s.npy' % (args.fact,args.dim,args.dataset)
print len(song_index)
inv_song_index = dict()
for i,song_id in enumerate(song_index):
inv_song_index[song_id] = i
# Read all data into memory (this might need to change if data too large)
all_Y = []
songs_dataset = []
Y = np.zeros((len(index_X), int(args.dim)))
for i, song_id in enumerate(index_X):
# all_Y.append(song_factors[inv_song_index[song_id]])
Y[i, :] = song_factors[inv_song_index[song_id]]
if i % progress_update == 0:
sys.stdout.write("\rLoading Data: %.2f%%" %
(100 * i / float(len(index_X))))
sys.stdout.flush()
sys.stdout.write("\rLoading Data: 100%")
sys.stdout.flush()
print "Y data loaded"
output_suffix_Y = '%s_%s_%s_%sx%s' % (args.fact, args.dim, args.dataset,
args.npatches, args.window)
normalize(Y, copy=False)
Y_file = common.DATASETS_DIR+'/train_data/Y_train_'+output_suffix_Y
np.save(Y_file, Y)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
desc | ription='Trains the model',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argum | ent('-d',
'--dataset',
dest="dataset",
type=str,
help='Dataset name',
default=DATASET)
parser.add_argument('-f',
'--fact',
dest="fact",
type=str,
help='Factorization method',
default=FACT)
parser.add_argument('-dim',
'--dim',
dest="dim",
type=str,
help='Factors dimensions',
default=DIM)
parser.add_argument('-w',
'--window',
dest="window",
type=str,
help='Patches window size in seconds',
default=WINDOW)
parser.add_argument('-np',
'--npatches',
dest="npatches",
type=str,
help='Number of patches',
default=N_PATCHES)
parser.add_argument('-x',
'--loadx',
dest="loadX",
help='Load X',
action='store_true',
default=False)
parser.add_argument('-y',
'--loady',
dest="loadY",
help='Load Y',
action='store_true',
default=False)
parser.add_argument('-all',
'--all',
dest="all_data",
help='All data, test and train set together',
action='store_true',
default=False)
args = parser.parse_args()
if args.loadX:
load_X(args)
if args.loadY:
load_Y(args)
|
import sys
p | rint("Hello, World!") | |
from cantilever_divingboard import *
# We need to scale the parameters before applying the optimization algorithm
# Normally there are about 20 orders of magnitude between the dimensions and
# the doping concentration, so this is a critical step |
# Run the script
freq_min = 1e3
freq_max = 1e5
omega_min = 100e3
initial_guess = (50e-6, 1e-6, 1e-6,
30e-6, 1e-6, 1e-6, 500e-9, 5., 1e15)
constraints = ((30e-6, 100e-6), (500e-9, 20e-6), (1e-6, 10e-6),
(2e-6, 100e-6), (500e-9, 5e-6), (500e-9, 20e-6), (30e-9, 10e-6),
(1., 10.), (1e15, 4e19))
x = optimize_cantilever(initial_guess, constraints, freq_min, freq_max, omega_min)
c = cantilever_divingboard(freq_min, freq_max, | x)
c.print_performance() |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that shows the complex DAG structure.
"""
from datetime import datetime
from airflow import models
from airflow.models.baseoperator import chain
from airflow.operators.bash import BashOperator
with models.DAG(
dag_id="example_complex",
schedule_interval=None,
start_date=datetime(2021, 1, 1),
catchup=False,
tags=['example', 'example2', 'example3'],
) as dag:
# Create
create_entry_group = BashOperator(task_id="create_entry_group", bash_command="echo create_entry_group")
create_entry_group_result = BashOperator(
task_id="create_entry_group_result", bash_command="echo create_entry_group_result"
)
create_entry_group_result2 = BashOperator(
task_id="create_entry_group_result2", bash_command="echo create_entry_group_result2"
)
create_entry_gcs = BashOperator(task_id="create_entry_gcs", bash_command="echo create_entry_gcs")
create_entry_gcs_result = BashOperator(
task_id="create_entry_gcs_result", bash_command="echo create_entry_gcs_result"
)
create_entry_gcs_result2 = BashOperator(
task_id="create_entry_gcs_result2", bash_command="echo create_entry_gcs_result2"
)
create_tag = BashOperator(task_id="create_tag", bash_command="echo create_tag")
create_tag_result = BashOperator(task_id="create_tag_result", bash_command="echo create_tag_result")
create_tag_result2 = BashOperator(task_id="create_tag_result2", bash_command="echo create_tag_result2")
create_tag_template = BashOperator(task_id="create_tag_template", bash_command="echo create_tag_template")
create_tag_template_result = BashOperator(
task_id="create_tag_template_result", bash_command="echo create_tag_template_result"
)
create_tag_template_result2 = BashOperator(
task_id="create_tag_template_result2", bash_command="echo create_tag_template_result2"
)
create_tag_template_field = BashOperator(
task_id="create_tag_template_field", bash_command="echo create_tag_template_field"
)
create_tag_template_field_result = BashOperator(
task_id="create_tag_template_field_result", bash_command="echo create_tag_template_field_result"
)
create_tag_template_field_result2 = BashOperator(
task_id="create_tag_template_field_result2", bash_command="echo create_tag_template_field_result"
)
| # Delete
delete_entry = BashOperator(task_id="delete_entry", bash_command="echo delete_entry")
create_entry_gcs >> delete_entry
delete_entry_group = BashOperator(task_id="delete_entry_group", | bash_command="echo delete_entry_group")
create_entry_group >> delete_entry_group
delete_tag = BashOperator(task_id="delete_tag", bash_command="echo delete_tag")
create_tag >> delete_tag
delete_tag_template_field = BashOperator(
task_id="delete_tag_template_field", bash_command="echo delete_tag_template_field"
)
delete_tag_template = BashOperator(task_id="delete_tag_template", bash_command="echo delete_tag_template")
# Get
get_entry_group = BashOperator(task_id="get_entry_group", bash_command="echo get_entry_group")
get_entry_group_result = BashOperator(
task_id="get_entry_group_result", bash_command="echo get_entry_group_result"
)
get_entry = BashOperator(task_id="get_entry", bash_command="echo get_entry")
get_entry_result = BashOperator(task_id="get_entry_result", bash_command="echo get_entry_result")
get_tag_template = BashOperator(task_id="get_tag_template", bash_command="echo get_tag_template")
get_tag_template_result = BashOperator(
task_id="get_tag_template_result", bash_command="echo get_tag_template_result"
)
# List
list_tags = BashOperator(task_id="list_tags", bash_command="echo list_tags")
list_tags_result = BashOperator(task_id="list_tags_result", bash_command="echo list_tags_result")
# Lookup
lookup_entry = BashOperator(task_id="lookup_entry", bash_command="echo lookup_entry")
lookup_entry_result = BashOperator(task_id="lookup_entry_result", bash_command="echo lookup_entry_result")
# Rename
rename_tag_template_field = BashOperator(
task_id="rename_tag_template_field", bash_command="echo rename_tag_template_field"
)
# Search
search_catalog = BashOperator(task_id="search_catalog", bash_command="echo search_catalog")
search_catalog_result = BashOperator(
task_id="search_catalog_result", bash_command="echo search_catalog_result"
)
# Update
update_entry = BashOperator(task_id="update_entry", bash_command="echo update_entry")
update_tag = BashOperator(task_id="update_tag", bash_command="echo update_tag")
update_tag_template = BashOperator(task_id="update_tag_template", bash_command="echo update_tag_template")
update_tag_template_field = BashOperator(
task_id="update_tag_template_field", bash_command="echo update_tag_template_field"
)
# Create
create_tasks = [
create_entry_group,
create_entry_gcs,
create_tag_template,
create_tag_template_field,
create_tag,
]
chain(*create_tasks)
create_entry_group >> delete_entry_group
create_entry_group >> create_entry_group_result
create_entry_group >> create_entry_group_result2
create_entry_gcs >> delete_entry
create_entry_gcs >> create_entry_gcs_result
create_entry_gcs >> create_entry_gcs_result2
create_tag_template >> delete_tag_template_field
create_tag_template >> create_tag_template_result
create_tag_template >> create_tag_template_result2
create_tag_template_field >> delete_tag_template_field
create_tag_template_field >> create_tag_template_field_result
create_tag_template_field >> create_tag_template_field_result2
create_tag >> delete_tag
create_tag >> create_tag_result
create_tag >> create_tag_result2
# Delete
delete_tasks = [
delete_tag,
delete_tag_template_field,
delete_tag_template,
delete_entry_group,
delete_entry,
]
chain(*delete_tasks)
# Get
create_tag_template >> get_tag_template >> delete_tag_template
get_tag_template >> get_tag_template_result
create_entry_gcs >> get_entry >> delete_entry
get_entry >> get_entry_result
create_entry_group >> get_entry_group >> delete_entry_group
get_entry_group >> get_entry_group_result
# List
create_tag >> list_tags >> delete_tag
list_tags >> list_tags_result
# Lookup
create_entry_gcs >> lookup_entry >> delete_entry
lookup_entry >> lookup_entry_result
# Rename
create_tag_template_field >> rename_tag_template_field >> delete_tag_template_field
# Search
chain(create_tasks, search_catalog, delete_tasks)
search_catalog >> search_catalog_result
# Update
create_entry_gcs >> update_entry >> delete_entry
create_tag >> update_tag >> delete_tag
create_tag_template >> update_tag_template >> delete_tag_template
create_tag_template_field >> update_tag_template_field >> rename_tag_template_field
|
def pytest_addoption(parser):
parser.ad | doption(
'--integration',
action='store_true',
help='run integration tests',
)
def pytest_ignore_collect(path, config):
if not config.getoption('integration') and 'int | egration' in str(path):
return True
|
def read_logfile_by_line(logfile):
"""generator function that yields the log file content line by line"""
with open(logfile, 'r') as f:
for line in f:
yield line
yield None
def parse_commands(log_content):
"""
parse cwl commands from the line-by-line generator of log file content and
returns the commands as a list of command line lists, each corresponding to a step run.
"""
command_list = []
command = []
in_command = False
line = next(log_content)
while(line):
line = line.strip('\n')
if '[job' in line and line.endswith('docker \\'):
line = 'docker \\' # remove the other stuff
in_command = True
| if in_command:
command.append(line.stri | p('\\').rstrip(' '))
if not line.endswith('\\'):
in_command = False
command_list.append(command)
command = []
line = next(log_content)
return(command_list)
|
import copy
import json
import logging
import threading
import uuid
from flask import Flask, abort, jsonify, request
import kubernetes
app = Flask(__name__)
app.secret_key = "mega secret key"
JOB_DB = {}
def get_config(experiment):
with open('config_template.json', 'r') as config:
return json.load(config)[experiment]
def filter_jobs(job_db):
job_db_copy = copy.deepcopy(job_db)
for job_name in job_db_copy:
del(job_db_copy[job_name]['obj'])
del(job_db_copy[job_name]['deleted'])
if job_db_copy[job_name].get('pod'):
del(job_db_copy[job_name]['pod'])
return job_db_copy
@app.route('/api/v1.0/jobs', methods=['GET'])
def get_jobs():
return jsonify({"jobs": filter_jobs(JOB_DB)}), 200
@app.route('/api/v1.0/k8sjobs', methods=['GET'])
def get_k8sjobs():
return jsonify({"jobs": kubernetes.get_jobs()}), 200
@app.route('/api/v1.0/jobs', methods=['POST'])
def create_job():
if not request.json \
or not ('experiment') in request.json\
or not ('docker-img' in request.json):
print(request.json)
abort(400)
cmd = request.json['cmd'] if 'cmd' in request.json else None
env_vars = (request.json['env-vars']
if 'env-vars' in request.json else {})
experiment_config = get_config(request.json['experiment'])
k8s_volume = experiment_config['k8s_volume']
job_id = str(uuid.uuid4())
job_obj = kubernetes.create_job(job_id,
request.json['docker-img'],
cm | d,
[(k8s_volume, '/data')],
env_vars,
request.json['experiment'])
if job_obj:
job = copy.deepcopy(request.json)
job['job-id'] = job_id
job['status'] = 'started'
| job['restart_count'] = 0
job['max_restart_count'] = 3
job['obj'] = job_obj
job['deleted'] = False
JOB_DB[job_id] = job
return jsonify({'job-id': job_id}), 201
else:
return jsonify({'job': 'Could not be allocated'}), 500
@app.route('/api/v1.0/jobs/<job_id>', methods=['GET'])
def get_job(job_id):
if job_id in JOB_DB:
job_copy = copy.deepcopy(JOB_DB[job_id])
del(job_copy['obj'])
del(job_copy['deleted'])
if job_copy.get('pod'):
del(job_copy['pod'])
return jsonify({'job': job_copy}), 200
else:
abort(404)
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(threadName)s - %(levelname)s: %(message)s'
)
job_event_reader_thread = threading.Thread(target=kubernetes.watch_jobs,
args=(JOB_DB,))
job_event_reader_thread.start()
pod_event_reader_thread = threading.Thread(target=kubernetes.watch_pods,
args=(JOB_DB,))
pod_event_reader_thread.start()
app.run(debug=True, port=5000,
host='0.0.0.0')
|
# encoding: utf-8
# This file is part of Guacamole.
#
# Copyright 2012-2015 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <zygmunt.krynicki@canonical.com>
#
# Guacamole is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3,
# as published by the Free Software Foundation.
#
# Guacamole is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Guacamole. If not, see <http://www.gnu.org/licenses/>.
"""Tests for the cmdtree module."""
from __future__ import absolute_import, print_function, unicode_literals
import unittest
from guacamole.core import Bowl
from guacamole.ingredients.cmdtree import CommandTreeBuilder
from guacamole.recipes.cmd import Command
class _sub(Command):
spices = ('mustard',)
class _cmd(Command):
spices = ('salt', 'pepper')
sub_commands = (('sub', _sub),)
class CommandTreeBuilderTests(unittest.TestCase):
"""Tests for the CommandTreeBuilder class."""
def setUp(self):
| """Common initialization method."""
self.bowl = Bowl([CommandTreeBuilder(_cmd())])
self.bowl.eat()
def test_build_command_tree(self):
"""check if a co | rrect command tree is built."""
cmd_obj = self.bowl.context.cmd_tree[1]
sub_obj = self.bowl.context.cmd_tree[2][0][1]
self.assertIsInstance(cmd_obj, _cmd)
self.assertIsInstance(sub_obj, _sub)
self.assertEqual(
self.bowl.context.cmd_tree,
(None, cmd_obj, (('sub', sub_obj, ()),)))
def test_collect_spices(self):
"""check if spices are collected from top-level command only."""
self.assertTrue(self.bowl.has_spice('salt'))
self.assertTrue(self.bowl.has_spice('pepper'))
self.assertFalse(self.bowl.has_spice('mustard'))
|
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from flask import current_app, redirect, request
from werkzeug.exceptions import NotFound
from indico.modules.attachments.controllers.util import SpecificAttachmentMixin
from indico.modules.attachments.models.legacy_mapping import LegacyAttachmentFolderMapping, LegacyAttachmentMapping
from indico.modules.events import LegacyEventMapping
from indico.util.string import is_legacy_id
from indico.web.flask.util import url_for
from MaKaC.webinterface.rh.base import RHSimple, RH
def _clean_args(kwargs):
if 'event_id' not in kwargs:
raise NotFound
if is_legacy_id(kwargs['event_id']):
mapping = LegacyEventMapping.find(legacy_event_id=kwargs['event_id']).first_or_404()
kwargs['event_id'] = mapping.event_id
if 'cont | rib_id' in kwargs:
kwargs['contribution_id'] = kwargs.p | op('contrib_id')
if 'subcontrib_id' in kwargs:
kwargs['subcontribution_id'] = kwargs.pop('subcontrib_id')
# extension is just to make the links prettier
kwargs.pop('ext', None)
# session id is only used for actual sessions, not for stuff inside them
if 'contribution_id' in kwargs:
kwargs.pop('session_id', None)
@RHSimple.wrap_function
def compat_folder(**kwargs):
_clean_args(kwargs)
folder = LegacyAttachmentFolderMapping.find(**kwargs).first_or_404().folder
if folder.is_deleted:
raise NotFound
return redirect(url_for('attachments.list_folder', folder), 302 if current_app.debug else 301)
def compat_folder_old():
mapping = {'confId': 'event_id',
'sessionId': 'session_id',
'contribId': 'contrib_id',
'subContId': 'subcontrib_id',
'materialId': 'material_id'}
kwargs = {mapping[k]: v for k, v in request.args.iteritems() if k in mapping}
return compat_folder(**kwargs)
def _redirect_to_note(**kwargs):
del kwargs['material_id']
del kwargs['resource_id']
kwargs['confId'] = kwargs.pop('event_id')
return redirect(url_for('event_notes.view', **kwargs), 302 if current_app.debug else 301)
@RHSimple.wrap_function
def compat_attachment(**kwargs):
_clean_args(kwargs)
mapping = LegacyAttachmentMapping.find_first(**kwargs)
if mapping is None:
if kwargs['material_id'] == 'minutes' and kwargs['resource_id'] == 'minutes':
return _redirect_to_note(**kwargs)
raise NotFound
attachment = mapping.attachment
if attachment.is_deleted or attachment.folder.is_deleted:
raise NotFound
return redirect(attachment.download_url, 302 if current_app.debug else 301)
class RHCompatAttachmentNew(SpecificAttachmentMixin, RH):
normalize_url_spec = dict(SpecificAttachmentMixin.normalize_url_spec,
endpoint='attachments.download')
def _process(self):
raise Exception('This RH should only perform URL normalization!')
|
import | sys
[_, ms, _, ns] = list(sys.stdin)
ms = set(int(m) for m in ms.split(' '))
ns = set(int(n) for n in ns.split(' '))
print(sep='\n', *sorted(ms.difference(ns).uni | on(ns.difference(ms))))
|
import os
from whylog.log_reader.exceptions impor | t EmptyFile, OffsetBiggerThanFileSize
class ReadUtils(object):
STANDARD_BUFFER_SIZE = 512
@classmethod
def size_of_ | opened_file(cls, fh):
prev_position = fh.tell()
fh.seek(0, os.SEEK_END)
size = fh.tell()
fh.seek(prev_position)
return size
@classmethod
def _read_content(cls, fd, position, buf_size):
fd.seek(position)
return fd.read(buf_size)
@classmethod
def _read_split_lines(cls, fd, position, buf_size):
content = cls._read_content(fd, position, buf_size)
return content.split('\n')
@classmethod
def _join_results(cls, first_part, second_part):
if not first_part:
if not second_part:
return []
return second_part
if not second_part:
return first_part
return first_part[:-1] + ["".join((first_part[-1], second_part[0]))] + second_part[1:]
@classmethod
def _expand_after(cls, fd, position):
fd.seek(position)
line = fd.readline()
if not line:
raise OffsetBiggerThanFileSize(position)
return line.rstrip('\n')
@classmethod
def _expand_before(cls, fd, position, buf_size):
before = []
while len(before) < 2:
position -= buf_size
if position <= 0:
lines = cls._read_split_lines(fd, 0, position + buf_size)
before = cls._join_results(lines, before)
break
lines = cls._read_split_lines(fd, position, buf_size)
before = cls._join_results(lines, before)
if not before:
raise EmptyFile()
return before[-1]
@classmethod
def _read_entire_line(cls, fd, offset, buf_size):
after = cls._expand_after(fd, offset)
before = cls._expand_before(fd, offset, buf_size)
return before + after, offset - len(before), offset + len(after)
@classmethod
def get_line_containing_offset(cls, fd, offset, buf_size):
"""
returns line which contains the specified offset
and returns also offsets of the first and the last sign of this line.
if there is '\n' on specified offset, the previous line is returned
"""
return cls._read_entire_line(fd, offset, buf_size)
|
#!python
# -*- coding: utf-8 -*-
from os import path
import shutil
def install():
fi | lename = 'ilmaruuvi.service'
install_path = path.join('/etc/systemd/system', filename)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, filename), 'r') as f:
service = f.read()
service = service.format(working_dir=here, exec_start=shuti | l.which('ilmaruuvi'))
with open(install_path, 'w') as f:
f.write(service) |
all PTX special objects.
"""
from __future__ import print_function, absolute_import, division
import operator
import numpy
import llvmlite.llvmpy.core as lc
from numba import types, ir, typing, macro
from .cudadrv import nvvm
class Stub(object):
'''A stub object to represent special objects which is meaningless
outside the context of CUDA-python.
'''
_description_ = '<ptx special value>'
__slots__ = () # don't allocate __dict__
def __new__(cls):
raise NotImplementedError("%s is not instantiable" % cls)
def __repr__(self):
return self._description_
#-------------------------------------------------------------------------------
# SREG
SREG_SIGNATURE = typing.signature(types.int32)
class threadIdx(Stub):
'''
The thread indices in the current thread block, accessed through the
attributes ``x``, ``y``, and ``z``. Each index is an integer spanning the
range from 0 inclusive to the corresponding value of the attribute in
:attr:`numba.cuda.blockDim` exclusive.
'''
_description_ = '<threadIdx.{x,y,z}>'
x = macro.Macro('tid.x', SREG_SIGNATURE)
y = macro.Macro('tid.y', SREG_SIGNATURE)
z = macro.Macro('tid.z', SREG_SIGNATURE)
class blockIdx(Stub):
'''
The block indices in the grid of thread blocks, accessed through the
attributes ``x``, ``y``, and ``z``. Each index is an integer spanning the
range from 0 inclusive to the corresponding value of the attribute in
:attr:`numba.cuda.gridDim` exclusive.
'''
_description_ = '<blockIdx.{x,y,z}>'
x = macro.Macro('ctaid.x', SREG_SIGNATURE)
y = macro.Macro('ctaid.y', SREG_SIGNATURE)
z = macro.Macro('ctaid.z', SREG_SIGNATURE)
class blockDim(Stub):
'''
The shape of a block of threads, as declared when instantiating the
kernel. This value is the same for all threads in a given kernel, even
if they belong to different blocks (i.e. each block is "full").
'''
x = macro.Macro('ntid.x', SREG_SIGNATURE)
y = macro.Macro('ntid.y', SREG_SIGNATURE)
z = macro.Macro('ntid.z', SREG_SIGNATURE)
class gridDim(Stub):
'''
The shape of the grid of blocks, accressed through the attributes ``x``,
``y``, and ``z``.
'''
_description_ = '<gridDim.{x,y,z}>'
x = macro.Macro('nctaid.x', SREG_SIGNATURE)
y = macro.Macro('nctaid.y', SREG_SIGNATURE)
z = macro.Macro('nctaid.z', SREG_SIGNATURE)
#-------------------------------------------------------------------------------
# Grid Macro
def _ptx_grid1d(): pass
def _ptx_grid2d(): pass
def grid_expand(ndim):
"""grid(ndim)
| Return the absolute position of the current thread in the entire
grid of blocks. *ndim* should correspond to the number of dimensions
declared when instantiating the kernel. If *ndim* is 1, a single | integer
is returned. If *ndim* is 2 or 3, a tuple of the given number of
integers is returned.
Computation of the first integer is as follows::
cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x
and is similar for the other two indices, but using the ``y`` and ``z``
attributes.
"""
if ndim == 1:
fname = "ptx.grid.1d"
restype = types.int32
elif ndim == 2:
fname = "ptx.grid.2d"
restype = types.UniTuple(types.int32, 2)
elif ndim == 3:
fname = "ptx.grid.3d"
restype = types.UniTuple(types.int32, 3)
else:
raise ValueError('argument can only be 1, 2, 3')
return ir.Intrinsic(fname, typing.signature(restype, types.intp),
args=[ndim])
grid = macro.Macro('ptx.grid', grid_expand, callable=True)
#-------------------------------------------------------------------------------
# Gridsize Macro
def gridsize_expand(ndim):
"""
Return the absolute size (or shape) in threads of the entire grid of
blocks. *ndim* should correspond to the number of dimensions declared when
instantiating the kernel.
Computation of the first integer is as follows::
cuda.blockDim.x * cuda.gridDim.x
and is similar for the other two indices, but using the ``y`` and ``z``
attributes.
"""
if ndim == 1:
fname = "ptx.gridsize.1d"
restype = types.int32
elif ndim == 2:
fname = "ptx.gridsize.2d"
restype = types.UniTuple(types.int32, 2)
elif ndim == 3:
fname = "ptx.gridsize.3d"
restype = types.UniTuple(types.int32, 3)
else:
raise ValueError('argument can only be 1, 2 or 3')
return ir.Intrinsic(fname, typing.signature(restype, types.intp),
args=[ndim])
gridsize = macro.Macro('ptx.gridsize', gridsize_expand, callable=True)
#-------------------------------------------------------------------------------
# synthreads
class syncthreads(Stub):
'''
Synchronize all threads in the same thread block. This function implements
the same pattern as barriers in traditional multi-threaded programming: this
function waits until all threads in the block call it, at which point it
returns control to all its callers.
'''
_description_ = '<syncthread()>'
# -------------------------------------------------------------------------------
# memory fences
class threadfence_block(Stub):
'''
A memory fence at thread block level
'''
_description_ = '<threadfence_block()>'
class threadfence_system(Stub):
'''
A memory fence at system level: across devices
'''
_description_ = '<threadfence_system()>'
class threadfence(Stub):
'''
A memory fence at device level
'''
_description_ = '<threadfence()>'
# -------------------------------------------------------------------------------
# shared
def _legalize_shape(shape):
if isinstance(shape, tuple):
return shape
elif isinstance(shape, int):
return (shape,)
else:
raise TypeError("invalid type for shape; got {0}".format(type(shape)))
def shared_array(shape, dtype):
shape = _legalize_shape(shape)
ndim = len(shape)
fname = "ptx.smem.alloc"
restype = types.Array(dtype, ndim, 'C')
sig = typing.signature(restype, types.UniTuple(types.intp, ndim), types.Any)
return ir.Intrinsic(fname, sig, args=(shape, dtype))
class shared(Stub):
"""
Shared memory namespace.
"""
_description_ = '<shared>'
array = macro.Macro('shared.array', shared_array, callable=True,
argnames=['shape', 'dtype'])
'''
Allocate a shared array of the given *shape* and *type*. *shape* is either
an integer or a tuple of integers representing the array's dimensions.
*type* is a :ref:`Numba type <numba-types>` of the elements needing to be
stored in the array.
The returned array-like object can be read and written to like any normal
device array (e.g. through indexing).
'''
#-------------------------------------------------------------------------------
# local array
def local_array(shape, dtype):
shape = _legalize_shape(shape)
ndim = len(shape)
fname = "ptx.lmem.alloc"
restype = types.Array(dtype, ndim, 'C')
sig = typing.signature(restype, types.UniTuple(types.intp, ndim), types.Any)
return ir.Intrinsic(fname, sig, args=(shape, dtype))
class local(Stub):
'''
Local memory namespace.
'''
_description_ = '<local>'
array = macro.Macro('local.array', local_array, callable=True,
argnames=['shape', 'dtype'])
'''
Allocate a local array of the given *shape* and *type*. The array is private
to the current thread, and resides in global memory. An array-like object is
returned which can be read and written to like any standard array (e.g.
through indexing).
'''
#-------------------------------------------------------------------------------
# const array
def const_array_like(ndarray):
fname = "ptx.cmem.arylike"
from .descriptor import CUDATargetDesc
aryty = CUDATargetDesc.typingctx.resolve_argument_type(ndarray)
sig = typing.signature(aryty, aryty)
return ir.Intrinsic(fname, sig, args=[ndarray])
class const(Stu |
"""
sampyl.samplers.NUTS
~~~~~~~~~~~~~~~~~~~~
This module implements No-U-Turn Sampler (NUTS).
:copyright: (c) 2015 by Mat Leonard.
:license: MIT, see LICENSE for more details.
"""
from __future__ import division
import collections
from ..core import np
from .base import Sampler
from .hamiltonian import energy, leapfrog, initial_momentum
class NUTS(Sampler):
""" No-U-Turn sampler (Hoffman & Gelman, 2014) for sampling from a
probability distribution defined by a log P(theta) function.
For technical details, see | the paper:
http://www.stat.columbia.edu/~gelman/research/published/nuts.pdf
:param logp: log P(X) function for sampling distribution
:param start:
Dictionary of starting state for the sampler. Sho | uld have one
element for each argument of logp.
:param grad_logp: (optional)
Function or list of functions that calculate grad log P(theta).
Pass functions here if you don't want to use autograd for the
gradients. If logp has multiple parameters, grad_logp must be
a list of gradient functions w.r.t. each parameter in logp.
If you wish to use a logp function that returns both the logp
value and the gradient, set grad_logp = True.
:param scale: (optional)
Dictionary with same format as start. Scaling for initial
momentum in Hamiltonian step.
:param step_size: (optional) *float.*
Initial step size for the deterministic proposals.
:param adapt_steps: (optional) *int.*
Integer number of steps used for adapting the step size to
achieve a target acceptance rate.
:param Emax: (optional) *float.* Maximum energy.
:param target_accept: (optional) *float.* Target acceptance rate.
:param gamma: (optional) *float.*
:param k: (optional) *float.* Scales the speed of step size
adaptation.
:param t0: (optional) *float.* Slows initial step size adaptation.
Example ::
def logp(x, y):
...
start = {'x': x_start, 'y': y_start}
nuts = sampyl.NUTS(logp, start)
chain = nuts.sample(1000)
"""
def __init__(self, logp, start,
step_size=0.25,
adapt_steps=100,
Emax=1000.,
target_accept=0.65,
gamma=0.05,
k=0.75,
t0=10.,
**kwargs):
super(NUTS, self).__init__(logp, start, **kwargs)
self.step_size = step_size / len(self.state.tovector())**(1/4.)
self.adapt_steps = adapt_steps
self.Emax = Emax
self.target_accept = target_accept
self.gamma = gamma
self.k = k
self.t0 = t0
self.Hbar = 0.
self.ebar = 1.
self.mu = np.log(self.step_size*10)
def step(self):
""" Perform one NUTS step."""
H = self.model.logp
dH = self.model.grad
x = self.state
r0 = initial_momentum(x, self.scale)
u = np.random.uniform()
e = self.step_size
xn, xp, rn, rp, y = x, x, r0, r0, x
j, n, s = 0, 1, 1
while s == 1:
v = bern(0.5)*2 - 1
if v == -1:
xn, rn, _, _, x1, n1, s1, a, na = buildtree(xn, rn, u, v, j, e, x, r0,
H, dH, self.Emax)
else:
_, _, xp, rp, x1, n1, s1, a, na = buildtree(xp, rp, u, v, j, e, x, r0,
H, dH, self.Emax)
if s1 == 1 and bern(np.min(np.array([1, n1/n]))):
y = x1
dx = (xp - xn).tovector()
s = s1 * (np.dot(dx, rn.tovector()) >= 0) * \
(np.dot(dx, rp.tovector()) >= 0)
n = n + n1
j = j + 1
if self._sampled >= self.adapt_steps:
self.step_size = self.ebar
else:
# Adapt step size
m = self._sampled + 1
w = 1./(m + self.t0)
self.Hbar = (1 - w)*self.Hbar + w*(self.target_accept - a/na)
log_e = self.mu - (m**.5/self.gamma)*self.Hbar
self.step_size = np.exp(log_e)
z = m**(-self.k)
self.ebar = np.exp(z*log_e + (1 - z)*np.log(self.ebar))
self.state = y
self._sampled += 1
return y
def bern(p):
return np.random.uniform() < p
def buildtree(x, r, u, v, j, e, x0, r0, H, dH, Emax):
if j == 0:
x1, r1 = leapfrog(x, r, v*e, dH)
E = energy(H, x1, r1)
E0 = energy(H, x0, r0)
dE = E - E0
n1 = (np.log(u) - dE <= 0)
s1 = (np.log(u) - dE < Emax)
return x1, r1, x1, r1, x1, n1, s1, np.min(np.array([1, np.exp(dE)])), 1
else:
xn, rn, xp, rp, x1, n1, s1, a1, na1 = \
buildtree(x, r, u, v, j-1, e, x0, r0, H, dH, Emax)
if s1 == 1:
if v == -1:
xn, rn, _, _, x2, n2, s2, a2, na2 = \
buildtree(xn, rn, u, v, j-1, e, x0, r0, H, dH, Emax)
else:
_, _, xp, rp, x2, n2, s2, a2, na2 = \
buildtree(xp, rp, u, v, j-1, e, x0, r0, H, dH, Emax)
if bern(n2/max(n1 + n2, 1.)):
x1 = x2
a1 = a1 + a2
na1 = na1 + na2
dx = (xp - xn).tovector()
s1 = s2 * (np.dot(dx, rn.tovector()) >= 0) * \
(np.dot(dx, rp.tovector()) >= 0)
n1 = n1 + n2
return xn, rn, xp, rp, x1, n1, s1, a1, na1
|
#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2017 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License | .
"""MIME-Type Parser.
This module provides basic functions for handling mime-types. It can handle
matching mime-types against a list of media-ranges. See section 14.1 of
the HTTP specification [RFC 2616] for a complete explanation.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
Contents:
| - parse_mime_type(): Parses a mime-type into its component parts.
- parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q'
quality parameter.
- quality(): Determines the quality ('q') of a mime-type when compared
against a list of media-ranges.
- quality_parsed(): Just like quality() except the second parameter must be
pre-parsed.
- best_match(): Choose the mime-type with the highest quality ('q') from a
list of candidates.
"""
from functools import reduce
__version__ = "0.1.2"
__author__ = "Joe Gregorio"
__email__ = "joe@bitworking.org"
__credits__ = ""
# TODO: Can probably delete this module.
def parse_mime_type(mime_type):
"""Carves up a mime-type and returns a tuple of the (type, subtype, params) where
'params' is a dictionary of all the parameters for the media range. For example, the
media range 'application/xhtml;q=0.5' would get parsed into:
('application', 'xhtml', {'q', '0.5'})
"""
parts = mime_type.split(";")
params = dict([tuple([s.strip() for s in param.split("=")]) for param in parts[1:]])
full_type = parts[0].strip()
# Java URLConnection class sends an Accept header that includes a single "*"
# Turn it into a legal wildcard.
if full_type == "*":
full_type = "*/*"
(type, subtype) = full_type.split("/")
return (type.strip(), subtype.strip(), params)
def parse_media_range(range):
"""Carves up a media range and returns a tuple of the (type, subtype, params) where
'params' is a dictionary of all the parameters for the media range. For example, the
media range 'application/\*;q=0.5' would get parsed into:
('application', '\*', {'q', '0.5'})
In addition this function also guarantees that there
is a value for 'q' in the params dictionary, filling it
in with a proper default if necessary.
"""
(type, subtype, params) = parse_mime_type(range)
if (
"q" not in params
or "q" not in params
or not float(params["q"])
or float(params["q"]) > 1
or float(params["q"]) < 0
):
params["q"] = "1"
return (type, subtype, params)
def fitness_and_quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a given mime-type against a list of media_ranges that
have already been parsed by parse_media_range().
Returns a tuple of the fitness value and the value of the 'q' quality parameter of
the best match, or (-1, 0) if no match was found. Just as for quality_parsed(),
'parsed_ranges' must be a list of parsed media ranges.
"""
best_fitness = -1
best_fit_q = 0
(target_type, target_subtype, target_params) = parse_media_range(mime_type)
for (type, subtype, params) in parsed_ranges:
if (type == target_type or type == "*" or target_type == "*") and (
subtype == target_subtype or subtype == "*" or target_subtype == "*"
):
param_matches = reduce(
lambda x, y: x + y,
[
1
for (key, value) in list(target_params.items())
if key != "q" and key in params and value == params[key]
],
0,
)
fitness = (type == target_type) and 100 or 0
fitness += (subtype == target_subtype) and 10 or 0
fitness += param_matches
if fitness > best_fitness:
best_fitness = fitness
best_fit_q = params["q"]
return best_fitness, float(best_fit_q)
def quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a given mime-type against a list of media_ranges that
have already been parsed by parse_media_range().
Returns the 'q' quality parameter of the best match, 0 if no match was found. This
function behaves the same as quality() except that 'parsed_ranges' must be a list of
parsed media ranges.
"""
return fitness_and_quality_parsed(mime_type, parsed_ranges)[1]
def quality(mime_type, ranges):
"""Returns the quality 'q' of a mime-type when compared against the media- ranges in
ranges. For example:
>>> quality('text/html', 'text/*;q=0.3, text/html;q=0.7, text/html;level=1,
text/html;level=2;q=0.4, */*;q=0.5')
0.7
"""
parsed_ranges = [parse_media_range(r) for r in ranges.split(",")]
return quality_parsed(mime_type, parsed_ranges)
def best_match(supported, header):
"""Takes a list of supported mime-types and finds the best match for all the media-
ranges listed in header. The value of header must be a string that conforms to the
format of the HTTP Accept: header. The value of 'supported' is a list of mime-types.
>>> best_match(['application/xbel+xml', 'text/xml'], 'text/*;q=0.5,*/*; q=0.1')
'text/xml'
"""
parsed_header = [parse_media_range(r) for r in header.split(",")]
weighted_matches = [
(fitness_and_quality_parsed(mime_type, parsed_header), mime_type)
for mime_type in supported
]
weighted_matches.sort()
return weighted_matches[-1][0][1] and weighted_matches[-1][1] or ""
|
# Copyright (c) 2015 SUSE Linux GmbH. All rights reserved.
#
# This file is part of kiwi.
#
# kiwi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kiwi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kiwi. If not, see <http://www.gnu.org/licenses/>
#
import re
import os
from typing import (
NamedTuple, Optional
)
# project
from kiwi.command import Command
from kiwi.exceptions import KiwiKernelLookupError
kernel_type = NamedTuple(
'kernel_type', [
('name', str),
('filename', str),
('version', str)
]
)
xen_hypervisor_type = NamedTuple(
'xen_hypervisor_type', [
('filename', str),
('name', str)
]
)
class Kernel:
"""
**Implementes kernel lookup and extraction from given root tree**
:param str root_dir: root directory path name
:param list kernel_names: list of kernel names to search for
functions.sh::suseStripKernel() provides a normalized
file so that we do not have to search for many different
names in this code
"""
def __init__(self, root_dir: str):
self.root_dir = root_dir
self.kernel_names = self._setup_kernel_names_for_lookup()
def get_kernel(
self, raise_on_not_found: bool = False
) -> Optional[kernel_type]:
"""
Lookup kernel files and provide filename and version
:param bool raise_on_not_found: sets the method to raise an exception
if the kernel is not found
:raises KiwiKernelLookupError: if raise_on_not_found flag is active
and kernel is not found
:return: tuple with filename, kernelname and version
:rtype: tuple|None
"""
for kernel_name in self.kernel_names:
kernel_file = os.sep.join(
[self.root_dir, 'boot', kernel_name]
)
if os.path.exists(kernel_file):
version_match = re.match(
'.*?-(.*)', os.path.basename(kernel_file)
)
if version_match:
version = version_match.group(1)
return kernel_type(
name=os.path.basename(os.path.realpath(kernel_file)),
filename=kernel_file,
version=version
)
if raise_on_not_found:
raise KiwiKernelLookupError(
'No kernel found in {0}, searched for {1}'.format(
os.sep.join([self.root_dir, 'boot']),
','.join(self.kernel_names)
)
)
return None
def get_xen_hypervisor(self) -> Optional[xen_hypervisor_type]:
"""
Lookup xen hypervisor and provide filename and hypervisor name
:return: tuple with filename and hypervisor name
:rtype: tuple|None
"""
xen_hypervisor = self.root_dir + '/boot/xen.gz'
if os.path.exists(xen_hypervisor):
return xen_hypervisor_type(
filename=xen_hypervisor,
name='xen.gz'
)
return None
def copy_kernel(self, target_dir: str, file_name: str = None) -> None:
"""
Copy kernel to specified target
If no file_name is given the target filename is set
as kernel-<kernel.version>.kernel
:param str target_dir: target path name
:param str filename: base filename in target
"""
kernel = self.get_kernel()
if kernel:
if not file_name:
file_name = 'kernel-' + kernel.version + '.kernel'
target_file = ''.join(
[target_dir, '/', file_name]
)
Command.run(['cp', kernel.filename, target_file])
def copy_xen_hypervisor(
self, target_dir: str, file_name: str = None
) -> None:
"""
Copy xen hypervisor to specified target
If no file_name is given the target filename is set
as hypervisor-<xen.name>
:param str target_dir: target path name
:para | m str filename: base filename in target
"""
xen = self.get_xen_hypervisor()
if xen:
if not file_name:
file_name = 'hypervisor-' + xen.name
target_file = ''.join(
[target_dir, '/', file_name]
)
Command.run(['cp', xen.filename, target_file])
def _setup_kernel_names_for_look | up(self):
"""
The kernel image name is different per arch and distribution
This method returns a list of possible kernel image names in
order to search and find one of them
:return: list of kernel image names
:rtype: list
"""
kernel_names = []
kernel_dirs = sorted(
os.listdir(''.join([self.root_dir, '/lib/modules']))
)
if kernel_dirs:
# append lookup for the real kernel image names
# depending on the arch and os they are different
# in their prefix
kernel_prefixes = [
'uImage', 'Image', 'zImage', 'vmlinuz', 'image', 'vmlinux'
]
kernel_name_pattern = '{prefix}-{name}'
for kernel_prefix in kernel_prefixes:
for kernel_dir in kernel_dirs:
kernel_names.append(
kernel_name_pattern.format(
prefix=kernel_prefix, name=kernel_dir
)
)
return kernel_names
|
import medic
from maya import OpenMaya
class FaceAssigned(medic.PyTester):
def __init__(self):
super(FaceAssigned, self).__init__()
def Name(self):
return "FaceAssigned"
def Description(self):
return "Face assigned mesh(s)"
def Match(self, node):
return node.object().hasFn(OpenMaya.MFn.kMesh) or node.object().hasFn(OpenMaya.MFn.kNurbsSurfaceGeom)
@staticmethod
def __TestObjGrp(node, parentPlug, childPlug):
dg = node.dg()
if not dg.hasAttribute(parentPlug) or not dg.hasAttribute(childPlug):
return False
io_plug = node.dg().findPlug(parentPlug)
og_obj = node.dg().attribute(childPlug)
for i in range(io_plug.numElements()):
elm = io_plug.elementByPhysicalIndex(i)
og_plug = elm.child(og_obj)
if not og_plug.numConnectedElements():
continue
for j in range(og_plug.numElements()):
gelm = og_plug.elementByPhysicalIndex(j)
| arr = OpenMaya.MPlugArray()
if not gelm.connectedTo(arr, False, True):
continue
for n in range(arr.length()):
if arr[n].node().hasFn(OpenMaya.MFn.kShadingEngine):
return True
return False
def test(self, node):
if FaceAssigned.__TestObjGrp(node, "compInstObjGroups", "compObjectGro | ups"):
return medic.PyReport(node)
if FaceAssigned.__TestObjGrp(node, "instObjGroups", "objectGroups"):
return medic.PyReport(node)
return None
def Create():
return FaceAssigned()
|
image = clutter.Texture()
if image_src:
self.image.set_from_file(image_src)
self.image.set_parent(self)
self.set_font_name('16')
self.set_font_color('#000000ff')
self.set_inner_color('#aaaaaaff')
self.set_border_color('#888888ff')
def set_image_src(self, image_src):
self.image.set_from_file(image_src)
def do_allocate(self, box, flags):
btn_width = box.x2 - box.x1
btn_height = box.y2 - box.y1
inner_width = btn_width - 2*self._padding.x
inner_height = btn_height - 2*self._padding.y
# allocate background
self._allocate_rect(0, 0, btn_width, btn_height, flags)
# allocate image
if self._has_text:
label_height = ClassicButton.do_get_preferred_height(self, for_width=inner_width)[1]
remaining_height = btn_height - label_height - self.spacing
else:
label_height = 0
remaining_height = inner_height
image_preferred_size = self.image.get_preferred_size()
if image_preferred_size[3] > 0:
image_ratio = float(image_preferred_size[2]) / float(image_preferred_size[3])
if self._expand:
image_height = remaining_height
image_width = round(float(image_height) * float(image_ratio))
if image_width > inner_width:
image_width = inner_width
image_height = round(float(image_width) / float(image_ratio))
else:
image_height = image_preferred_size[3]
if remaining_height < image_height:
image_height = remaining_height
image_width = round(float(image_height) * float(image_ratio))
if image_width > inner_width:
image_width = inner_width
image_height = round(float(image_width) / float(image_ratio))
else:
image_width = 0
image_height = 0
x_padding = round((inner_width - image_width) / 2.0)
y_padding = round((remaining_height - image_height) / 2.0)
image_box = clutter.ActorBox()
image_box.x1 = self._padding.x + x_padding
image_box.y1 = self._padding.y + y_padding
image_box.x2 = image_box.x1 + image_width
image_box.y2 = image_box.y1 + image_height
self.image.allocate(image_box, flags)
# allocate label
if self._has_text:
base_y = image_height + self.spacing
label_height = btn_height - base_y
self._allocate_label(0, base_y, btn_width, label_height, flags)
clutter.Actor.do_allocate(self, box, flags)
def do_set_property(self, pspec, value):
return ClassicButton.do_set_property(self, pspec, value)
def do_get_property(self, pspec):
return ClassicButton.do_get_property(self, pspec)
def do_paint(self):
self.rect.paint()
self.image.paint()
if self._has_text:
self.label.paint()
def do_foreach(self, func, data=None):
ClassicButton.do_foreach(self, func, data)
func(self.image, data)
def do_destroy(self):
self.unparent()
if hasattr(self, 'image'):
if self.image:
self.image.unparent()
self.image.destroy()
try:
ClassicButton.do_destroy(self)
except:
pass
gobject.type_register(ImageButton)
if __name__ == '__main__':
from flowbox import FlowBox
stage = clutter.Stage()
stage.connect('destroy', clutter.main_quit)
#toto = cogl.Material()
texture_path = '/home/aviolo/sources/easycast/unstable/easycast/images/buttons/copy.png'
texture = clutter.cogl.texture_new_from_file(texture_path, clutter.cogl.TEXTURE_NO_SLICING, clutter.cogl.PIXEL_FORMAT_ANY)
#toto.set_layer(0, texture)
#stage.add(toto)
t = ClassicButton('test efopkzekfopzf opfzeopfkz opfzegjzeh guzehiug ezhgiozeghizeogh eziogzeoighze oigzeiogzeig opg jzeopgjzepogzzeogjze zeigergre ergerg', texture = texture, rounded = True)
t.set_size(640, 480)
stage.add(t)
'''
# Main flowbox
box0 = FlowBox()
box0.set_size(640, 640)
# Invisible rectangle for top margin
r = clutter.Rectangle()
r.set_size(640, 1)
box0.add(r)
# Button at natural size
b = ClassicButton('Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmo | d tempor incididunt.')
b.set_size(*b.get_preferred_size()[2:])
box0.add(b)
# Butt | on larger than natural size
b = ClassicButton('Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt.')
b.set_size(630, 50)
box0.add(b)
# Intermediate flowbox to force line wrapping
box1 = FlowBox()
box1.set_size(640, 50)
box0.add(box1)
# Button fitter than natural size
b = ClassicButton('Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.')
b.set_size(420, 50)
box1.add(b)
# Button more fitter than natural size
b = ClassicButton('Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.')
b.set_size(210, 50)
box0.add(b)
# Intermediate flowbox to force line wrapping
box2 = FlowBox()
box2.set_size(640, 50)
box0.add(box2)
# Button at minimal size (just suspension marks)
b = ClassicButton('Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt.')
b.set_size(*b.get_preferred_size()[:2])
box2.add(b)
# Invisible rectangle for bottom margin
r = clutter.Rectangle()
r.set_size(640, 1)
box0.add(r)
# Testing buttons
b = ClassicButton('A')
b.set_size(15, 15)
b.set_position(5, 450)
stage.add(b)
b = ClassicButton('B')
b.set_size(25, 25)
b.set_position(50, 425)
stage.add(b)
b = ClassicButton('C')
b.set_font_color('Yellow')
b.set_size(50, 50)
b.set_position(125, 375)
stage.add(b)
b = ClassicButton('D')
b.set_border_width(10)
b.set_border_color('Green')
b.set_size(100, 100)
b.set_position(250, 325)
stage.add(b)
b = ClassicButton('E', texture=texture)
b.set_inner_color('Pink')
b.set_size(170, 170)
b.set_position(425, 210)
stage.add(b)
stage.add(box0)
'''
test_memory_usage = False
if test_memory_usage:
import gc
gc.set_debug(gc.DEBUG_LEAK)
from pprint import pprint
max_count = 5000
#texture_path = '/home/sdiemer/sources/candies/main/candies2/effect_light.png'
texture = clutter.cogl.texture_new_from_file(texture_path, clutter.cogl.TEXTURE_NO_SLICING, clutter.cogl.PIXEL_FORMAT_ANY)
texture = None
def create_test_object():
t = ClassicButton('test efopkzekfopzf opfzeopfkz opfzegjzeh guzehiug ezhgiozeghizeogh eziogzeoighze oigzeiogzeig opg jzeopgjzepogzzeogjze zeigergre ergerg', texture = texture, rounded = True)
return t
def remove_test_object(obj, stage):
obj.destroy()
return False
def test_memory(stage, counter, max_count):
if counter < max_count or max_count == 0:
counter += 1
print counter
tested_object = create_test_object()
stage.add(tested_object)
gobject.timeout_add(2, remove_tested_object, tested_object, stage, counter)
return False
def remove_tested_object(tested_object, stage, counter):
remove_test_object(tested_object, stage)
gc.collect()
pprint(gc.garbage)
gobject.timeout_add(2, test_memory, stage, counter, max_count)
return False
gobject.timeout_add(10, test_memory, stage, 0, max_count)
stage.show() |
import logging
from . import generic
from .elfreloc import ELFReloc
l = logging.getLogger(name=__name__)
# http://refspecs.linuxfoundation.org/ELF/ppc64/PPC-elf64abi-1.9.pdf
arch = 'PPC64'
class R_PPC64_JMP_SLOT(ELFReloc):
def relocate(self):
if self.owner.is_ppc64_abiv1:
# R_PPC64_JMP_SLOT
# http://osxr.org/glibc/source/sysdeps/powerpc/powerpc64/dl-machine.h?v=glibc-2.15#0405
# copy an entire function descriptor struct
addr = self.resolvedby.owner.memory.unpack_word(self.resolvedby.relative_addr)
toc = self.resolvedby.owner.memory.unpack_word(self.resolvedby.relative_addr + 8)
aux = self.resolvedby.owner.memory.unpack_word(self.resolvedby.relative_addr + 16)
self.owner.memory.pack_word(self.relative_addr, addr)
self.owner.memory.pack_word(self.relative_addr + 8, toc)
self.owner.memory.pack_word(self.relative_addr + 16, aux)
else:
self.owner.memory.pack_word(self.relative_addr, self.resolvedby.rebased_addr)
return True
class R_PPC64_RELATIVE(generic.GenericRelativeReloc):
pass
class R_PPC64_IRELATIVE(generic.GenericIRelativeReloc):
pass
class R_PPC64_ADDR64(generic.GenericAbsoluteAddendReloc):
pass
class R_PPC64_GLOB_DAT(generic.GenericJumpslotReloc):
pass
class R_PPC64_DTPMOD64(generic.GenericTLSModIdReloc):
pass
class R_PPC64_DTPREL64(generic.GenericTLSDoffsetReloc):
pass
class R_PPC64_TPREL64(generic.GenericTLSOffsetReloc):
pass
class R_PPC64_REL24(ELFReloc):
"""
Relocation Type: 10
Calculation: (S + A - P) >> 2
Field: low24*
"""
@property
def value(self):
A = self.addend
S = self.resolvedby.rebased_addr
P = self.rebased_addr
return (S + A - P) >> 2
def relocate(self):
if not self.resolved:
return False
instr = self.owner.memory.unpack_word(self.relative_addr, size=4) & 0b11111100000000000000000000000011
imm = self.value & 0xFFFFFF
self.owner.memory.pack_word(self.relative_addr, instr | (imm << 2), size=4)
return True
class R_PPC64_TOC16_LO(ELFReloc):
"""
Relocation Type: 48
Calculation: #lo(S + A - .TOC.)
Field: half16
"""
@property
def value(self):
A = self.addend
S = self.resolvedby.rebased_addr
if self.owner.ppc64_initial_rtoc is None:
l.warning(".TOC. value not found")
return (S + A) & 0xFFFF
TOC = self.owner.ppc64_initial_rtoc
return (S + A - TOC) & 0xFFFF
def relocate(self):
if not self.resolved:
return False
self.owner.memory.pack_word(self.relative_addr, self.value, size=2)
return True
class R_PPC64_TOC16_HI(ELFReloc):
"""
Relocation Type: 49
Calculation: #hi(S + A - .TOC.)
Field: half16
"""
@property
def value(self):
A = self.addend
S = self.resolvedby.rebased_addr
if self.owner.ppc64_initial_rtoc is None:
l.warning(".TOC. value not found")
return ((S + A) >> 16) & 0xFFFF
TOC = self.owner.ppc64_initial_rtoc
return ((S + A - TOC) >> 16) & 0xFFFF
def relocate(self):
if not self.resolved:
| return False
self.owner.memory.pack_word(self.relative_addr, self.value, size=2)
return True
class R_PPC64_TOC16_HA(ELFReloc):
"""
Relocation Type: 50
Calculation: | #ha(S + A - .TOC.)
Field: half16
"""
@property
def value(self):
A = self.addend
S = self.resolvedby.rebased_addr
if self.owner.ppc64_initial_rtoc is None:
l.warning(".TOC. value not found")
return ((((S + A) >> 16) + (1 if ((S + A) & 0x8000) else 0)) & 0xFFFF)
TOC = self.owner.ppc64_initial_rtoc
return ((((S + A - TOC) >> 16) + (1 if ((S + A - TOC) & 0x8000) else 0)) & 0xFFFF)
def relocate(self):
if not self.resolved:
return False
self.owner.memory.pack_word(self.relative_addr, self.value, size=2)
return True
class R_PPC64_TOC(ELFReloc):
"""
Relocation Type: 51
Calculation: .TOC.
Field: doubleword64
"""
@property
def value(self):
if self.owner.ppc64_initial_rtoc is None:
l.warning(".TOC. value not found")
return 0
return self.owner.ppc64_initial_rtoc
|
def flatten(x):
"""
Takes an N times nested list of lis | t like [[a,b],[c, [d, e]],[f]]
and returns a single list [a,b,c,d,e,f]
"""
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, str):
result.extend(flatten(el))
else:
result.append(el)
retur | n result
|
ene table
detailed_out.write("\t".join(["Chromosome","Gene_name","Is_hgnc","Ensembl_gene_id","Ensembl_transcript_id","Biotype",
"Transcript_status","CCDS_id","HGNC_id","CDS_length","Protein_length",
"Transcript_start","Transcript_end","strand","Synonyms",
"Rvis_pct","entrez_gene_id","mammalian_phenotype_id"]))
detailed_out.write("\n")
summary_out.write("\t".join(["Chromosome","Gene_name","Is_hgnc","Ensembl_gene_id",
"HGNC_id","Synonyms", "Rvis_pct","Strand","Transcript_min_start","Transcript_max_end","Mammalian_phenotype_id"]))
summary_out.write("\n")
mouse_phenotype = defaultdict(list)
genic_intolerance = defaultdict(list)
keygene = list_hgnc = []
#initializing values for the summary gene table
transcript_min = defaultdict(list)
transcript_max = defaultdict(list)
lines_seen = set()
for line in open("genic_intolerance_dataset2", 'r'):
if line.startswith("#") is False:
field = line.strip().split("\t")
name = str(field[0])
score = str(field[1])
percentile = str(field[2])
(key,value) = (name, percentile)
genic_intolerance[name].append(percentile)
#Phenotype data from MGI - Jax
for row in open("HMD_HumanPhenotype", 'r'):
col = row.strip().split("\t")
#Remove leading white spaces in the column
entrez_id = str(col[1]).lstrip()
#Remove leading white spaces in the column & join MP terms with a comma
mph = str(col[5]).lstrip().replace(' ',',') if str(col[5]) != '' else None
(key,value) = (entrez_id, mph)
mouse_phenotype[entrez_id].append(mph)
# Dictionary for summary gene table to handle transcript min, max co-ordinates
for each in open("raw_gene_table", 'r'):
if each.startswith("Chromosome") is False:
k = each.strip().split("\t")
chr = "chr"+str((k[0]))
ens = str(k[2])
start = str(k[10])
end = str(k[11])
transcript_min[(chr,ens)].append(start)
transcript_max[(chr,ens)].append(end)
for each in open("raw_gene_table", 'r'):
if each.startswith("Chromosome") is False:
k = each.strip().split("\t")
chrom = "chr"+str((k[0]))
hgnc = str(k[1])
ens_geneid = str(k[2])
ens_transid = str(k[3])
trans_biotype = str(k[4])
status = str(k[5])
ccds_id = str(k[6]) #these id's are unique to transcripts
hgnc_id = str(k[7])
cds_len = str(k[8])
protein_len = str(k[9])
transcript_start = str(k[10])
transcript_end = str(k[11])
strand = str(k[12])
#remove space between names
previous = str(k[13]).replace(" ","")
synonyms = str(k[14]).replace(" ","")
entrez = str(k[15])
# sort all transcript start and end positions for a gene (use ens_geneid, since HGNC is not always true)
# Capture the first and the last position from the sorted list to give min, max
if (chrom,ens_geneid) in transcript_min:
minmum = sorted(transcript_min[(chrom,ens_geneid)])[0]
if (chrom,ens_geneid) in transcript_max:
maxmum = sorted(transcript_max[(chrom,ens_geneid)])[-1]
rvis = genic_intolerance[hgnc][0] if hgnc in genic_intolerance else None
pheno = mouse_phenotype[entrez] if entrez in mouse_phenotype else None
if pheno is not None and len(pheno) == 1:
phenotype = pheno[0]
elif pheno is None:
phenotype = "None"
else:
if len(pheno) > 1:
#convert the list to a string
string = ",".join(pheno)
# store a None for multiple Nones
if "None" in string and "MP:" not in string:
phenotype = None
#remove redundancy in MP terms
if "None" not in string and "MP:" in string:
phenotype = ",".join(set(string.split(",")))
#remove nones when MP terms are available
if "None" in string and "MP:" in string:
phen = string.split(",")
phenotype = ",".join([x for x in phen if x != "None"])
if hgnc != "None":
list_hgnc.append(hgnc)
#we don't want string of Nones
if "None" in previous and "None" in synonyms and "None" in hgnc:
string = None
else:
# We would like all genes names to be put together
gene_string = hgnc+","+previous+","+synonyms
#get rid of Nones in gene strings
if gene_string.startswith("None"):
string = gene_string.replace("None,","")
else:
| string = gene_string.replace(",None","")
#Nonetype object has no attribute split
if string is not None:
genes = set(string.split(","))
if len(genes) > 1:
# We would like to represent each member of the gene list as a key and the remainder as synonyms each time
for each in genes:
keygene = | set([each])
synonym = genes.difference(keygene)
gene_name = ','.join(keygene)
other_names = ','.join(synonym)
hgnc_flag = "1" if gene_name in list_hgnc else "0"
# only when the gene is a HGNC name, it would have an hgnc id
is_hgnc_id = hgnc_id if gene_name in list_hgnc else "None"
# handling duplicate lines (due to transcripts) in summary table (which we don't care for in this table)
# writing to outfile for the summary gene table
line = "\t".join([chrom,gene_name,hgnc_flag,ens_geneid,is_hgnc_id,
other_names,str(rvis),strand,minmum,maxmum,str(phenotype)])
if line not in lines_seen:
summary_out.write(line)
summary_out.write("\n")
lines_seen.add(line)
# Writing to out for detailed gene table
detailed_out.write("\t".join([chrom,gene_name,hgnc_flag,ens_geneid,ens_transid,trans_biotype,
status,ccds_id,is_hgnc_id,cds_len,protein_len,transcript_start,
transcript_end,strand,other_names,str(rvis),entrez,str(phenotype)]))
detailed_out.write("\n")
# if there is one gene name in the list, we just want it to be the key
elif len(genes) == 1:
gene_name = ','.join(genes)
other_names = "None"
hgnc_flag = "1" if gene_name in list_hgnc else "0"
is_hgnc_id = hgnc_id if gene_name in list_hgnc else "None"
# handling duplicate lines (due to transcripts) in summary table (which we don't care for in this table)
# writing to outfile for the summary gene table
line = "\t".join([chrom,str(gene_name),hgnc_flag,ens_geneid,is_hgnc_id,
other_names,str(rvis),strand,minmum,maxmum,str(phenotype)])
if line not in lines_seen:
summary_out.write(line)
summary_out.write("\n")
lines_seen.add(line)
# write to out for detailed gene table
detailed_out.write("\t".join([chrom,str(gene_name),hgnc_flag,ens_geneid,ens_transid,trans_biotype,
status,ccds_id,is_hgnc_id,cds_len,protein_len,transcript_start,
transcript_end,strand,other_names,str(rvis),entrez,str(phenotype)]))
detailed_out.write("\n")
# if there are no HGNC, previous or synonyms names for an ensembl entry, just re |
# -*- coding: u | tf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0014_generalsetting_titulo'),
]
operations = [
migrations.AlterField(
model_name='imagen',
name='img',
field=models | .ImageField(upload_to=b'imgenEvento', verbose_name=b'Ruta'),
),
]
|
#!/usr | /bin/env python3
# -*- coding : utf-8 -*-
def mymodules2():
print("test module2!")
mymodules2( | ) |
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n"
print(errmsg, what, file=sys.__stderr__)
print(errmsg, what, file=console)
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self.tkconsole.text.after(self.tkconsole.pollinterval,
self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import RemoteObjectBrowser
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.TreeWidget import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.GetOption('main','Theme','name')
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assum | es complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
with tokenize.open(filename) as fp:
source = fp.read()
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
| tkerr = self.tkconsole.stderr
print('*** Error in script or command!\n', file=tkerr)
print('Traceback (most recent call last):', file=tkerr)
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
# at the moment, InteractiveInterpreter expects str
assert isinstance(source, str)
#if isinstance(source, str):
# from idlelib import IOBinding
# try:
# source = source.encode(IOBinding.encoding)
# except UnicodeError:
# self.tkconsole.resetoutput()
# self.write("Unsupported characters in input\n")
# return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Override Interactive Interpreter method: Use Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
tkconsole = self.tkconsole
text = tkconsole.text
text.tag_remove("ERROR", "1.0", "end")
type, value, tb = sys.exc_info()
msg = getattr(value, 'msg', '') or value or "<no detail available>"
lineno = getattr(value, 'lineno', '') or 1
offset = getattr(value, 'offset', '') or 0
if offset == 0:
lineno += 1 #mark end of offending line
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
tkconsole.colorize_syntax_error(text, pos)
tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % msg)
tkconsole.showprompt()
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in list(c.keys()):
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec(code, self.locals)
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec(code, self.locals)
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
master=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print("IDLE internal error in runcode()",
file=self.tkconsole.stderr)
se |
event time to emit
sleep(3)
self.assertFalse(PostgresConnector.is_server_master.called)
PostgresConnector.is_server_master = old_func
class ClientComponentTestCase(TestCase):
def test_add_listener_throws_on_non_component(self):
client = Client(MagicMock(), MagicMock())
self.assertRaises(InvalidConfigurationException,
client.add_listener,
3)
def test_add_processor_throws_on_non_component(self):
client = Client(MagicMock(), MagicMock())
self.assertRaises(InvalidConfigurationException,
client.add_processor,
3)
def test_add_listener_accepts_component(self):
client = Client(MagicMock())
client.add_listener(Component(MagicMock(),
MagicMock(),
MagicMock()))
self.assertIsInstance(client._listener, Component)
def test_add_processor_accepts_component(self):
client = Client(MagicMock(), MagicMock())
client.add_processor(Component(MagicMock(),
MagicMock(),
MagicMock()))
self.assertIsInstance(client._processor, Component)
class ValidateComponentsTestCase(TestCase):
def test_throws_on_non_listener(self):
client = Client(MagicMock())
client._processor = 3
self.assertRaises(InvalidConfigurationException,
client._validate_components)
def test_throws_on_non_processor(self):
client = Client(MagicMock())
client._listener = 3
self.assertRaises(InvalidConfigurationException,
client._validate_components)
def test_throws_on_different_queue(self):
client = Client(MagicMock())
client._listener = MagicMock()
client._processor = MagicMock()
client._listener.error_queue = MagicMock(
return_value=True
)
client._listener.error_queue = MagicMock(
return_value=False
)
self.assertRaises(InvalidConfigurationException,
client._validate_components)
class WatchdogObserverTestCase(TestCase):
def setUp(self):
self.client = Client(MagicMock())
self.client.directory_observer = MagicMock()
def test_start_schedules_obeserver_if_watch_path(self):
self.client._watch_path = randint(50, 1000)
self.client._start_observer()
self.client.directory_observer.schedule.assert_called_once_with(
self.client, self.client._watch_path, recursive=False
)
self.client.directory_observer.start.assert_called_once_with()
def test_start_not_schedule_observer_if_none_watch_path(self):
self.client._watch_path = None
self.client._start_observer()
self.assertEqual(self.client.directory_observer.schedule.call_count, 0)
self.assertEqual(self.client.directory_observer.start.call_count, 0)
def test_stop_stops_observer_if_watch_path_and_observer(self):
self.client.directory_observer.is_alive.return_value = True
self.client._watch_path = True
self.client._stop_observer()
self.client.directory_observer.stop.assert_called_once_with()
def test_stop_does_not_stop_observer_on_none(self):
self.client._watch_path = None
self.client._stop_observer()
self.assertEqual(self.client.directory_observer.stop.call_count, 0)
def test_stop_does_not_stop_on_dead(self):
self.client._watch_path = True
self.client.directory_observer.is_alive.return_value = False
self.client._stop_observer()
self.assertEqual(self.client.directory_observer.stop.call_count, 0)
class ClientStartupTestCase(TestCase):
def test_startup_functions_are_called(self):
with patch('multiprocessing.Process.start') as mock_process_start:
with patch('hermes.client.signal') as mock_signal:
client = Client(MagicMock())
client._validate_components = MagicMock()
client.start()
self.assertEqual(mock_signal.call_count, 2)
client._validate_components.assert_called_once_with()
mock_process_start.assert_called_once_with()
def test_initial_start_components(self):
client = Client(MagicMock())
client._processor = MagicMock()
client._processor.is_alive.return_value = False
client._listener = MagicMock()
client._listener.is_alive.return_value = False
client._start_components()
client._listener.start.assert_called_once_with()
client._processor.start.assert_called_once_with()
def test_start_components_when_components_running(self):
client = Client(MagicMock())
client._processor = MagicMock()
client._processor.is_alive.return_value = True
client._listener = MagicMock()
client._listener.is_alive.return_value = True
client._start_components()
self.assertEqual(client._listener.start.call_count, 0)
self.assertEqual(client._processor.start.call_count, 0)
def test_join_is_called_on_restart(self):
client = Client(MagicMock())
client._processor = MagicMock()
client._processor.is_alive.return_value = False
client._processor.ident.return_value = True
client._listener = MagicMock()
client._listener.is_alive.return_value = False
client._listener.ident.return_value = True
client._start_components(restart=True)
client._listener.join.assert_called_once_with()
client._processor.join.assert_called_ | once_with()
class ClientShutdownTestCase(TestCase):
def test_shutdown(self):
client = Client(MagicMock())
client.log = MagicMock()
client._stop_components = MagicMock()
c | lient._stop_observer = MagicMock()
client._should_run = True
client._shutdown()
client._stop_components.assert_called_once_with()
client._stop_observer.assert_called_once_with()
self.assertFalse(client._should_run)
def test_stop_terminates(self):
client = Client(MagicMock())
client._processor = MagicMock()
client._listener = MagicMock()
client._processor.ident.return_value = True
client._listener.ident.return_value = True
client._processor.is_alive.return_value = True
client._listener.is_alive.return_value = True
client._stop_components()
client._processor.terminate.assert_called_once_with()
client._listener.terminate.assert_called_once_with()
client._listener.join.assert_called_once_with()
client._processor.join.assert_called_once_with()
def test_handle_terminate_when_same_process(self):
with patch('hermes.client.Client.ident',
new_callable=PropertyMock) as mock_ident:
client = Client(MagicMock())
client._shutdown = MagicMock()
mock_ident.return_value = getpid()
client._handle_terminate(None, None)
client._shutdown.assert_called_once_with()
def test_handle_terminate_when_different_process(self):
with patch('hermes.client.Client.ident',
new_callable=PropertyMock) as mock_ident:
client = Client(MagicMock())
client._exit_queue = MagicMock()
client._shutdown = MagicMock()
current_pid = getpid()
mock_ident.return_value = current_pid + 1
client._handle_terminate(None, None)
client._exit_queue.put_nowait.assert_called_once_with(True)
def test_handle_sigchld_when_should_not_run(self):
client = Client(MagicMock())
client._processor = MagicMock()
client._should_run = False
client._handle_sigchld(None, None)
self.assertEqual(
client._processor.error_queue.get_nowait.call_count, 0
)
def test_handle_sigchld_when_expected_error_and_terminate(self):
client = Client(MagicMock())
client._processor = Mag |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import random
class BackoffTimer(object):
def __init__(self, ratio=1, max_interval=None, min_interval=None):
self.c = 0
self.ratio = ratio
self.max_interval = max_interval
self.min_interval = min_interval
def is_reset(self):
return self.c == 0
def reset(self):
self.c = 0
return self
def success(self):
self.c = max(self.c - 1, 0)
return self
def failure(self):
self.c += 1
return | self
def get_interval(self):
k = pow(2, self.c) - 1
interval = random.random() * k * self.ratio
if self.max_interval is not None:
interval = min(interval, self.max_interval)
i | f self.min_interval is not None:
interval = max(interval, self.min_interval)
return interval
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import sys
from pip.basecommand import Command
from pip.index import PackageFinder
from pip.log import logger
from pip.exceptions import CommandError, PreviousBuildDirError
from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.util import normalize_path
from pip.wheel import WheelBuilder
from pip import cmdoptions
DEFAULT_WHEEL_DIR = os.path.join(normalize_path(os.curdir), 'wheelhouse')
class WheelCommand(Command):
"""
Build Wheel archives for your requirements and dependencies.
Wheel is a built-package format, and offers the advantage of not recompiling your software during every install.
For more details, see the wheel docs: http://wheel.readthedocs.org/en/latest.
Requirements: setuptools>=0.8, and wheel.
'pip wheel' uses the bdist_wheel setuptools extension from the wheel package to build individual wheels.
"""
name = 'wheel'
usage = """
%prog [options] <requirement specifier> ...
%prog [options] -r <requirements file> ...
%prog [options] <vcs project url> ...
%prog [options] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Build wheels from your requirements.'
def __init__(self, *args, **kw):
super(WheelCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-w', '--wheel-dir',
dest='wheel_dir',
metavar='dir',
default=DEFAULT_WHEEL_DIR,
help="Build wheels into <dir>, where the default is '<cwd>/wheelhouse'.")
cmd_opts.add_option(cmdoptions.use_wheel.make())
cmd_opts.add_option(cmdoptions.no_use_wheel.make())
cmd_opts.add_option(
'--build-option',
dest='build_options',
metavar='options',
action='append',
help="Extra arguments to be supplied to 'setup.py bdist_wheel'.")
cmd_opts.add_option(cmdoptions.requirements.make())
cmd_opts.add_option(cmdoptions.download_cache.make())
cmd_opts.add_option(cmdoptions.no_deps.make())
cmd_opts.add_option(cmdoptions.build_dir.make())
cmd_opts.add_option(
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the 'bdist_wheel' command.")
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, pip only finds stable versions.")
cmd_opts.add_option(cmdoptions.no_clean.make())
index_opts = cmdoptions.make_option_group(cmdoptions.index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
# confirm requirements
try:
import wheel.bdist_wheel
except ImportError:
raise CommandError("'pip wheel' requires the 'wheel' package. To fix this, run: pip install wheel")
try:
import pkg_resources
except ImportError:
raise CommandError(
"'pip wheel' requires setuptools >= 0.8 for dist-info support."
" To fix this, run: pip install --upgrade setuptools"
)
else:
if not hasattr(pkg_resources, 'DistInfoDistribution'):
raise CommandError(
"'pip wheel' requires setuptools >= 0.8 for dist-info "
"support. To fix this, run: pip install --upgrade "
"setuptools"
)
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.notify('Ignoring indexes: %s' % ','.join(index_urls))
index_urls = []
if options.use_mirrors:
logger.deprecated("1.7",
"--use-mirrors has been deprecated and will be removed"
" in the future. Explicit uses of --index-url and/or "
"--extra-index-url is suggested.")
if options.mirrors:
logger.deprecated("1.7",
"--mirrors has been deprecated and will be removed in "
" the future. Explicit uses of --index-url and/or "
"--extra-index-url is suggested.")
index_urls += options.mirrors
session = self._build_session(options)
finder = PackageFinder(find_links=options.find_links,
index_urls=index_urls,
use_wheel=options.use_wheel,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
allow_all_unverified=options.allow_all_unverified,
allow_all_prereleases=options.pre,
process_dependency_links=
options.process_dependency_links,
session=session,
)
options.build_dir = os.path.abspath(options.build_dir)
requirement_set = RequirementSet(
build_dir=options.build_dir,
src_dir=None,
download_dir=None,
download_cache=options.download_cache,
ignore_dependencies=options.ignore_dependencies,
ignore_installed=True,
session=session,
wheel_download_dir=options.wheel_dir
)
# make the wheelhouse
if not os.path.exists(options.wheel_dir):
os.makedirs(options.wheel_dir)
#parse args and/or requirements files
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(name, None))
for filename in options.requirements:
for req in parse_requirements(
filename,
finder=finder,
| options=options,
session=session):
if req.editable:
| logger.notify("ignoring %s" % req.url)
continue
requirement_set.add_requirement(req)
#fail if no requirements
if not requirement_set.has_requirements:
opts = {'name': self.name}
msg = ('You must give at least one requirement '
'to %(name)s (see "pip help %(name)s")' % opts)
logger.error(msg)
return
try:
#build wheels
wb = WheelBuilder(
requirement_set,
finder,
options.wheel_dir,
build_options = options.build_options or [],
global_options = options.global_options or []
)
wb.build()
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
if not options.no_clean:
requirement_set.cleanup_files()
|
# -*- coding: utf-8 -*-
import os
import lxml.etree
import io
from . import pipeline_item
import core.docvert_exception
class GeneratePostConversionEditorFiles(pipeline_item.pipeline_stage):
def stage(self, pipeline_value):
return pi | peline_value
| |
import random
from plugin import Plugin
class Flatter(Plugin):
def help_text(self, bot):
return bot.translate("flatter_help")
def on_msg(self, bot, user_nick, host, channel, message):
if message.lower().startswith(b | ot.translate("flatter_cmd")):
if len(message.split()) >= 2:
if bot.getlanguage() == "de":
bot.send_message(channel, message.split()[1] + ", " + random.choice(list(open('lists/flattery.txt'))), user_nick)
elif bot.getlanguage() == "en":
# Source http://www.pickuplinesgalore.com/cheesy.html
bot.send_message(channel, message.split()[1] + ", " + random.choice(list(open('lists/flattery_en.txt'))), | user_nick)
|
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import sys
import matplotlib.lines as lines
import h5py
from matplotlib.font_manager import FontProperties
import matplotlib.ticker as ticker
from scipy.fftpack import fft
axial_label_font = FontProperties()
axial_label_font.set_family('sans-serif')
axial_label_font.set_style('normal')
axial_label_font.set_weight('bold')
# axial_label_font.set_size('x-large')
axial_label_font.set_size(20)
legend_label_font = FontProperties()
legend_label_font.set_family('sans-serif')
legend_label_font.set_style('normal')
legend_label_font.set_weight('normal')
# legend_label_font.set_size('large')
legend_label_font.set_size(16)
def node_response_extraction_sequential(node_ID, file_name, num_DOF):
h5_file = h5py.File(file_name, 'r');
Time = h5_file['time'][:];
displacement_index = int(h5_file['Model/Nodes/Index_to_Generalized_Displacements'][node_ID]);
displacement_component = h5_file['Model/Nodes/Generalized_Displacements'][int(displacement_index):int(displacement_index+num_DOF), :];
acceleration_component = h5_file['Model/Nodes/Generalized_Accelerations'][int(displacement_index):int(displacement_index+num_DOF), :];
for x1 in xrange(0,num_DOF):
displacement_component[x1,:] = displacement_component[x1,:]-displacement_component[x1,0]; ### in case self weight loading stage, get relative displacement
return Time, displacement_component, acceleration_component;
numbercol = 1;
surface_node_ID = 252; ## 252, 250, 249, 251
node_ID = [252, 212, 172, 132, 92, 52, 12]; ## node ID from surface to bottom
depth = [0, 2, 4, 6, 8, 10, 12];
bottom_node_ID = 6; ## node just beyond DRM layer
file_name = 'Motion1C_DRM_propagation.h5.feioutput' ##
parameteric_case = 'Motion1C_Northridge' ##
### ==========================================================================
postfix = '.feioutput';
middle_name_less_than_ten = '0';
num_DOF = 3;
Time, displacement_component_surface, acceleration_component_surface = node_response_extraction_sequential(surface_node_ID, file_name, num_DOF);
Time, displacement_component_bottom, acceleration_component_bottom = node_response_extraction_sequential(bottom_node_ID, file_name, num_DOF);
# surface_acc = np.loadtxt('Kobe_acc.txt');
# surface_disp = np.loadtxt('Kobe_disp.txt');
surface_acc = np.loadtxt('scaled_northridge_acc.dat');
surface_disp = np.loadtxt('scaled_northridge_dis.dat');
########################################################################################
#######===== Print acceleration of nodes ===== ######
########################################################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(surface_acc[:, 0], surface_acc[:, 1], '-r', label='surface analytical', linewidth= 1.5);
ax.plot(Time[200:]-2.0, acceleration_component_surface[0, 200:], '-k', label='DRM propagation', linewidth= 0.5);
plt.gca().set_xlim([0,38]);
# plt.gca().set_ylim([-10,10]);
# plt.gca().get_xaxis().set_ticks(np.arange(0, 60.1, 10))
# plt.gca().get_yaxis().set_ticks(np.arange(-15, 3.1, 3))
plt.gca().get_yaxis().set_major_formatter(ticker.FormatStrFormatter('%0.2f'))
plt.gca().get_xaxis().set_tick_params(direction='in',labelsize='x-large')
plt.gca().get_yaxis().set_tick_params(direction='in',labelsize='x-large')
plt.xlabel('Time [s]', fontproperties=axial_label_font);
plt.ylabel('Acc. [$m/s^2$]', fontproperties=axial_label_font);
plt.grid(True);
plt.legend(ncol= numbercol, loc='upper right', prop=legend_label_font);
filename = 'acc_check_'+ parameteric_case + '.pdf'
plt.savefig(filename, bbox_inches='tight');
plt.show();
# # # ########################################################################################
# # # #######======================== Print Time series response along the depth ===== ######
# # # ########################################################################################
# print "Plot acceleration records along depth!";
# fig = plt.figure()
# ax = fig.add_subplot(111)
# # scale_meter = 7;
# # plt.gca().text(32.7, 1.25, '$1g$', fontsize=20)
# # l1 = lines.Line2D([32, 32], [0.5, 0.5+10/scale_meter], color='k', linewidth=2.0)
# # l2 = lines.Line2D([31.7, 32.3], [0.5, 0.5], color='k', linewidth=0.5)
# # l3 = lines.Line2D([31.7, 32.3], [0. | 5+10/scale_meter, 0.5+10/scale_meter], color='k', linewidth=0.5)
# # plt.gca().add_line(l1);
# # plt.gca().add_line(l2);
# # plt.gca().add | _line(l3);
# PGA_depth = sp.zeros(len(depth));
# for x in xrange(0,len(node_ID)):
# current_node = node_ID[x];
# current_depth = depth[x];
# Time, current_displacement_component, current_acceleration_component = node_response_extraction_sequential(current_node, file_name, num_DOF);
# plot_current_acceleration = current_depth + current_acceleration_component/15.0; ## scale acceleration
# PGA_depth[x] = max(abs(current_acceleration_component[0, :]));
# ax.plot(Time, plot_current_acceleration[0, :], '-k', linewidth= 1);
# plt.gca().set_ylim([-1,13]);
# plt.gca().invert_yaxis()
# # plt.gca().get_xaxis().set_ticks(np.arange(0, 60.1, 10))
# # plt.gca().get_yaxis().set_ticks(np.arange(-15, 3.1, 3))
# plt.gca().get_yaxis().set_major_formatter(ticker.FormatStrFormatter('%0.2f'))
# plt.gca().get_xaxis().set_tick_params(direction='in',labelsize='x-large')
# plt.gca().get_yaxis().set_tick_params(direction='in',labelsize='x-large')
# plt.xlabel('Time [s]', fontproperties=axial_label_font);
# plt.ylabel('Depth. [m]', fontproperties=axial_label_font);
# plt.grid(True);
# plt.legend(ncol= numbercol, loc='upper right', prop=legend_label_font);
# filename = 'acc_depth_'+ parameteric_case + '.pdf'
# plt.savefig(filename, bbox_inches='tight');
# plt.show();
|
# -*- python -*-
# Copyright (C) 2009-2017 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/Users/build/work/GCC-7-build/install-native/share/gcc-arm-none-eabi'
libdir = '/Users/build/work/GCC-7-build/install-native/arm-none-eabi/lib/thumb/v7-m'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to g | et from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
| if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Call a function as a plain import would not execute body of the included file
# on repeated reloads of this object file.
from libstdcxx.v6 import register_libstdcxx_printers
register_libstdcxx_printers(gdb.current_objfile())
|
survey = Survey(name=name)
survey.set_beam(model='airy', n_sidelobes=1)
if name in ('chime-frb', 'wsrt-apertif', 'parkes-htru'):
survey.set_beam(model=name)
surveys.append(survey)
return surveys
def set_up_dirs(self, run=np.nan):
"""Create subdirectory for saving populations.
Returns True if directory had to be set up."""
f = f'{frbpoppy.paths.populations()}mc/'
if not os.path.isdir(f):
os.mkdir(f)
return True
if not np.isnan(run):
f = f'{frbpoppy.paths.populations()}mc/run_{run}/'
if not os.path.isdir(f):
os.mkdir(f)
return True
return False
def gen_par_set_1(self,
parallel=True,
lum_min=np.nan,
lum_max=np.nan,
w_mean=np.nan,
w_std=np.nan,
dm_igm_slope=np.nan,
dm_host=np.nan,
run=0):
alphas = np.linspace(-2.5, -1, 11)
sis = np.linspace(-2, 2, 11)
lis = np.linspace(-2, 0, 11)
# Put all options into a dataframe
if 'run' in self.so.df:
self.so.df = self.so.df[self.so.df.run != run]
opt = np.meshgrid(alphas, sis, lis, self.survey_ix)
options = np.array(opt).T.reshape(-1, 4)
df = pd.DataFrame(options, columns=('alpha', 'si', 'li', 'survey'))
df['run'] = run
df['par_set'] = 1
df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
df['date'] = datetime.today()
self.so.append(df)
self.so.map_surveys(self.survey_ix, self.survey_names)
self.so.save()
# Remove previous par_set of the same number
if not self.set_up_dirs(run=run):
fs = f'{frbpoppy.paths.populations()}mc/run_{run}/*'
for f in glob(fs):
os.remove(f)
def iter_alpha(i):
alpha = alphas[i]
pop = CosmicPopulation.complex(self.pop_size)
pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
pop.set_lum(model='constant', value=1)
if not np.isnan(w_mean):
pop.set_w(model='lognormal', mean=w_mean, std=w_std)
if not np.isnan(dm_igm_slope):
pop.set_dm_igm(model='ioka', slope=dm_igm_slope)
pop.set_dm_host(model='constant', value=dm_host)
pop.generate()
for si in sis:
pop.set_si(model='constant', value=si)
pop.gen_si()
for li in lis:
pop.set_lum(model='powerlaw',
low=1e40,
high=1e45, power=li)
if not np.isnan(lum_min):
pop.set_lum(model='powerlaw', low=lum_min,
high=lum_max, index=li)
pop.gen_lum()
for survey in self.surveys:
surv_pop = SurveyPopulation(pop, survey)
# Get unique identifier
mask = (self.so.df.par_set == 1)
mask &= (self.so | .df.run == run)
mask &= (self.so.df.alpha == al | pha)
mask &= (self.so.df.si == si)
mask &= (self.so.df.li == li)
mask &= (self.so.df.survey == survey.name)
uuid = self.so.df[mask].uuid.iloc[0]
surv_pop.name = f'mc/run_{run}/{uuid}'
surv_pop.save()
if parallel:
n_cpu = min([3, os.cpu_count() - 1])
pprint(f'{os.cpu_count()} CPUs available')
r = range(len(alphas))
Parallel(n_jobs=n_cpu)(delayed(iter_alpha)(i) for i in tqdm(r))
else:
[iter_alpha(i) for i in tqdm(range(len(alphas)))]
def gen_par_set_2(self,
parallel=True,
alpha=-1.5,
si=0,
w_mean=np.nan,
w_std=np.nan,
dm_igm_slope=np.nan,
dm_host=np.nan,
run=np.nan):
lis = np.linspace(-1.5, 0, 11)
lum_mins = 10**np.linspace(38, 46, 11)
lum_maxs = 10**np.linspace(38, 46, 11)
# Put all options into a dataframe
self.so.df = self.so.df[self.so.df.run != run]
opt = np.meshgrid(lis, lum_mins, lum_maxs, self.survey_ix)
options = np.array(opt).T.reshape(-1, 4)
cols = ('li', 'lum_min', 'lum_max', 'survey')
df = pd.DataFrame(options, columns=cols)
df['par_set'] = 2
df['run'] = run
df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
df['date'] = datetime.today()
df = df[~(df.lum_max < df.lum_min)]
self.so.append(df)
self.so.map_surveys(self.survey_ix, self.survey_names)
self.so.save()
# Remove previous par_set of the same number
if not self.set_up_dirs(run=run):
fs = f'{frbpoppy.paths.populations()}mc/run_{run}/*'
for f in glob(fs):
os.remove(f)
pop = CosmicPopulation.complex(self.pop_size)
if not np.isnan(alpha):
pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
pop.set_si(model='constant', value=si)
pop.set_lum(model='constant', value=1)
if not np.isnan(w_mean):
pop.set_w(model='lognormal', mean=w_mean, std=w_std)
if not np.isnan(dm_igm_slope):
pop.set_dm_igm(model='ioka', slope=dm_igm_slope)
pop.set_dm_host(model='constant', value=dm_host)
pop.generate()
def adapt_pop(e):
li, lum_min, lum_max = e
if lum_max < lum_min:
return
t_pop = deepcopy(pop)
t_pop.set_lum(model='powerlaw', low=lum_min, high=lum_max,
power=li)
t_pop.gen_lum()
for survey in self.surveys:
surv_pop = SurveyPopulation(t_pop, survey)
# Get unique identifier
mask = (self.so.df.par_set == 2)
mask &= (self.so.df.run == run)
mask &= (self.so.df.li == li)
mask &= (self.so.df.lum_min == lum_min)
mask &= (self.so.df.lum_max == lum_max)
mask &= (self.so.df.survey == survey.name)
uuid = self.so.df[mask].uuid.iloc[0]
surv_pop.name = f'mc/run_{run}/{uuid}'
surv_pop.save()
n_cpu = min([3, os.cpu_count() - 1])
pprint(f'{os.cpu_count()} CPUs available')
mg = np.meshgrid(lis, lum_mins, lum_maxs)
loop = np.array(mg).T.reshape(-1, 3)
if parallel:
Parallel(n_jobs=n_cpu)(delayed(adapt_pop)(e) for e in tqdm(loop))
else:
[adapt_pop(e) for e in tqdm(loop)]
def gen_par_set_3(self,
parallel=True,
alpha=-1.5,
si=0,
li=-1,
lum_min=1e40,
lum_max=1e40,
dm_igm_slope=np.nan,
dm_host=np.nan,
run=np.nan):
w_means = 10**np.linspace(-3, 1, 11)
w_stds = np.linspace(0, 3, 11)
# Put all options into a dataframe
self.so.df = self.so.df[self.so.df.run != run]
opt = np.meshgrid(w_means, w_stds, self.survey_ix)
options = np.array(opt).T.reshape(-1, 3)
cols = ('w_mean', 'w_std', 'survey')
df = pd.DataFrame(options, columns=cols)
df['run'] = run
df['par_set'] = 3
df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
df['date'] = datetime.today()
self.so.append(df)
self.so.map_surveys(self.survey_ix, self.survey_names)
self.so.save()
# Remove previous par_set of the same number
if not self.set_up_dirs |
class Solution(object):
def imageSmoother(self, M):
"""
:type M: List[L | ist[int]]
:rtype: List[List[int]]
"""
row, col = len(M), len(M[0])
ans = [[0]*col f | or i in xrange(row)]
for i in xrange(row):
for j in xrange(col):
cnt = 0
val = 0
for p in xrange(-1, 2):
for q in xrange(-1, 2):
if ((i+p)<0) or ((i+p)>=row) or ((j+q)<0) or ((j+q)>=col):
continue
cnt += 1
val += M[i+p][j+q]
ans[i][j] = val / cnt
return ans
|
"""Help to make choice"""
# PYTHON STUFFS #######################################################
import random
import shlex
from nemubot import context
from nemubot.exception import IMException
from nemubot.hooks import hook
from nemubot.module.more import Response
# MODULE INTERFACE ####################################################
@hook.command("choice")
def cmd_choice(msg):
if not len(msg.args):
raise IMEx | ception("indicate some terms to pick!")
return Response(random.choice(msg.args),
channel=msg.channel,
nick=msg.frm)
@hook.command("choicecmd")
def cmd_choicecmd(msg):
if not len(msg.args):
raise IMException("indicate some command to pick!")
choice = shlex.split(random.choice(msg.args))
retur | n [x for x in context.subtreat(context.subparse(msg, choice))]
@hook.command("choiceres")
def cmd_choiceres(msg):
if not len(msg.args):
raise IMException("indicate some command to pick a message from!")
rl = [x for x in context.subtreat(context.subparse(msg, " ".join(msg.args)))]
if len(rl) <= 0:
return rl
r = random.choice(rl)
if isinstance(r, Response):
for i in range(len(r.messages) - 1, -1, -1):
if isinstance(r.messages[i], list):
r.messages = [ random.choice(random.choice(r.messages)) ]
elif isinstance(r.messages[i], str):
r.messages = [ random.choice(r.messages) ]
return r
|
from datetime import datetime
import uuid
class Torrent(object):
def __init__(self):
self.tracker = None
self.url = None
self.title = None
self.magnet = None
self.seeders = None
self.leechers = None
self.size = None
self.date = None
self.details = None
self.uuid = uuid.uuid4().hex
self._remove = False
@property
def human_age(self):
if self.date:
age = datetime.now() - self.date
return "%s days" % (int(age.total_seconds()/(60*60*24)))
else:
return "Unknown"
@property
def human_size(self):
if self.size:
if self.size > 1000000000:
return "%.2f GB" % (self.size / 1000000000)
elif self.size > 1000000:
return "%.2f MB" % (self.size/1000000)
else:
return "%s KB" % (self.size/1000)
| @property
def html_friendly_title(self):
return self.title.replace('.', '.​').replace('[', '​[').replace(']', ']​')
def __unicode__(self):
return "%s Size: %s Seeders: %s Age: %s %s" % (self.title.ljust(60)[0:60], str(self.human_size).ljust(12),
| str(self.seeders).ljust(6), self.human_age,
self.tracker)
def __str__(self):
return self.__unicode__()
|
from . import services
def prep_rules(rules):
prepped = []
for rule in rules:
if rule['enabled']:
prepped.append(prep_rule(rule))
return prepped
def prep_rule(raw_rule):
rule = dict(raw_rule)
if rule['service'] != 'custom':
proto, port = services.decode_service(rule['service'])
if not (proto and port):
| raise ValueError("Unknown service: {service}".format(
service=rule['service']
))
|
rule['proto'] = proto
rule['port'] = port
if not rule['comment']:
rule['comment'] = "{service} service ({proto}:{port})".format(
service=rule['service'],
proto=proto,
port=port
)
return rule
|
self._operator_map = operator_map
def get_options(self):
return self._operator_map.get_options()
def handle_BoolOp(self, op):
return {self.handle(op.op): list(map(self.handle, op.values))}
def handle_And(self, op):
'''and'''
return '$and'
def handle_Or(self, op):
'''or'''
return '$or'
def handle_UnaryOp(self, op):
operator = self.handle(op.operand)
field, value = list(operator.items())[0]
return {field: {self.handle(op.op): value}}
def handle_Not(self, not_node):
'''not'''
return '$not'
def handle_Compare(self, compare):
if len(compare.comparators) != 1:
raise ParseError('Invalid number of comparators: {0}'.format(len(compare.comparators)),
col_offset=compare.comparators[1].col_offset)
return self._operator_map.handle(left=compare.left,
operator=compare.ops[0],
right=compare.comparators[0])
class SchemaFreeParser(Parser):
def __init__(self):
super(SchemaFreeParser, self).__init__(SchemaFreeOperatorMap())
class SchemaAwareParser(Parser):
def __init__(self, *a, **k):
super(SchemaAwareParser, self).__init__(SchemaAwareOperatorMap(*a, **k))
class FieldName(AstHandler):
def handle_Str(self, node):
return node.s
def handle_Name(self, name):
return name.id
def handle_Attribute(self, attr):
return '{0}.{1}'.format(self.handle(attr.value), attr.attr)
class OperatorMap(object):
def resolve_field(self, node):
return FieldName().handle(node)
def handle(self, operator, left, right):
field = self.resolve_field(left)
return {field: self.resolve_type(field).handle_operator_and_right(operator, right)}
class SchemaFreeOperatorMap(OperatorMap):
def get_options(self):
return None
def resolve_type(self, field):
return GenericField()
class SchemaAwareOperatorMap(OperatorMap):
def __init__(self, field_to_type):
self._field_to_type = field_to_type
def resolve_field(self, node):
field = super(SchemaAwareOperatorMap, self).resolve_field(node)
try:
self._field_to_type[field]
except KeyError:
raise ParseError('Field not found: {0}.'.format(field),
col_offset=node.col_offset,
options=self._field_to_type.keys())
return field
def resolve_type(self, field):
return self._field_to_type[field]
#---Function-Handlers---#
class Func(AstHandler):
@staticmethod
def get_arg(node, index):
if index > len(node.args) - 1:
raise ParseError('Missing argument in {0}.'.format(node.func.id),
col_offset=node.col_offset)
return node.args[index]
@staticmethod
def parse_arg(node, index, field):
return field.handle(Func.get_arg(node, index))
def handle(self, node):
try:
handler = getattr(self, 'handle_' + node.func.id)
except AttributeError:
raise ParseError('Unsupported function ({0}).'.format(node.func.id),
| col_offset=node.col_offset,
options=self.get_options())
return handler(node)
| def handle_exists(self, node):
return {'$exists': self.parse_arg(node, 0, BoolField())}
def handle_type(self, node):
return {'$type': self.parse_arg(node, 0, IntField())}
class StringFunc(Func):
def handle_regex(self, node):
result = {'$regex': self.parse_arg(node, 0, StringField())}
try:
result['$options'] = self.parse_arg(node, 1, StringField())
except ParseError:
pass
return result
class IntFunc(Func):
def handle_mod(self, node):
return {'$mod': [self.parse_arg(node, 0, IntField()),
self.parse_arg(node, 1, IntField())]}
class ListFunc(Func):
def handle_size(self, node):
return {'$size': self.parse_arg(node, 0, IntField())}
def handle_all(self, node):
return {'$all': self.parse_arg(node, 0, ListField())}
def handle_match(self, node):
return {'$elemMatch': self.parse_arg(node, 0, DictField())}
class DateTimeFunc(Func):
def handle_date(self, node):
return parse_date(self.get_arg(node, 0))
class IdFunc(Func):
def handle_id(self, node):
return self.parse_arg(node, 0, IdField())
class EpochFunc(Func):
def handle_epoch(self, node):
return self.parse_arg(node, 0, EpochField())
class EpochUTCFunc(Func):
def handle_epoch_utc(self, node):
return self.parse_arg(node, 0, EpochUTCField())
class GeoShapeFuncParser(Func):
def handle_Point(self, node):
return {'$geometry':
{'type': 'Point',
'coordinates': [self.parse_arg(node, 0, IntField()),
self.parse_arg(node, 1, IntField())]}}
def handle_LineString(self, node):
return {'$geometry':
{'type': 'LineString',
'coordinates': self.parse_arg(node, 0, ListField(ListField(IntField())))}}
def handle_Polygon(self, node):
return {'$geometry':
{'type': 'Polygon',
'coordinates': self.parse_arg(node, 0, ListField(ListField(ListField(IntField()))))}}
def handle_box(self, node):
return {'$box': self.parse_arg(node, 0, ListField(ListField(IntField())))}
def handle_polygon(self, node):
return {'$polygon': self.parse_arg(node, 0, ListField(ListField(IntField())))}
def _any_center(self, node, center_name):
return {center_name: [self.parse_arg(node, 0, ListField(IntField())),
self.parse_arg(node, 1, IntField())]}
def handle_center(self, node):
return self._any_center(node, '$center')
def handle_centerSphere(self, node):
return self._any_center(node, '$centerSphere')
class GeoShapeParser(AstHandler):
def handle_Call(self, node):
return GeoShapeFuncParser().handle(node)
def handle_List(self, node):
'''
This is a legacy coordinate pair. consider supporting box, polygon, center, centerSphere
'''
return ListField(IntField()).handle(node)
class GeoFunc(Func):
def _any_near(self, node, near_name):
shape = GeoShapeParser().handle(self.get_arg(node, 0))
result = bson.SON({near_name: shape}) # use SON because mongo expects the command before the arguments
if len(node.args) > 1:
distance = self.parse_arg(node, 1, IntField()) # meters
if isinstance(shape, list): # legacy coordinate pair
result['$maxDistance'] = distance
else:
shape['$maxDistance'] = distance
return result
def handle_near(self, node):
return self._any_near(node, '$near')
def handle_nearSphere(self, node):
return self._any_near(node, '$nearSphere')
def handle_geoIntersects(self, node):
return {'$geoIntersects': GeoShapeParser().handle(self.get_arg(node, 0))}
def handle_geoWithin(self, node):
return {'$geoWithin': GeoShapeParser().handle(self.get_arg(node, 0))}
class GenericFunc(StringFunc, IntFunc, ListFunc, DateTimeFunc,
IdFunc, EpochFunc, EpochUTCFunc, GeoFunc):
pass
#---Operators---#
class Operator(AstHandler):
def __init__(self, field):
self.field = field
def handle_Eq(self, node):
'''=='''
return self.field.handle(node)
def handle_NotEq(self, node):
'''!='''
return {'$ne': self.field.handle(node)}
def handle_In(self, node):
'''in'''
try:
elts = node.elts
except AttributeError:
raise ParseError('Invalid value type for `in` operator: {0}'.format(node.__class__.__name__),
col_offset=node.col_offset)
return {'$in': list(map(self.field.handle, elts))}
|
from __future__ import unicode_literals
from . import exceptions
DEFAULT_HOST = 'http://api.acoustid.org/'
FORMATS = ('json', 'jsonp', 'xml')
META = (
'recordings', 'recordingids', 'releases', 'releaseids',
'releasegroups', 'releasegroupids', 't | racks', 'compress',
| 'usermeta', 'sources'
)
ERRORS = {
1: exceptions.UnknownFormat,
2: exceptions.MissingParameter,
3: exceptions.InvalidFingerprint,
4: exceptions.InvalidClientKey,
5: exceptions.InternalError,
6: exceptions.InvalidUserApiKey,
7: exceptions.InvalidUUID,
8: exceptions.InvalidDuration,
9: exceptions.InvalidBitrate,
10: exceptions.InvalidForeignID,
11: exceptions.InvalidMaxDurationDiff,
12: exceptions.NotAllowed,
13: exceptions.ServiceUnavailable,
14: exceptions.TooManyRequests,
}
|
import sys
import time
from naoqi import ALProxy
IP = "nao.local"
PORT = 9559
if (len(sys.argv) < 2):
print "Usage: 'python RecordAudio.py nume'"
sys.exit(1)
fileName = "/home/nao/" + sys.argv[1] + ".wav"
aur = ALProxy("ALAudioRecorder", IP, PORT)
channels = [0,0,1,0]
aur.startMicrophonesRecording(fileName, | "wav", 160000, channels)
c=raw_input("Sfarsit?")
aur.stopMicrophonesRecording()
c=raw_input("play?")
aup = ALProxy("ALAudioPlayer", IP, PORT)
#Launchs the playing of a file
aup.playFile(fileName,0.5,-1.0)
c=raw_input("gata?")
#Launchs the playing of a file
#aup.playFile("/usr/share/naoqi/wav/random.wav")
#Launchs the playi | ng of a file on the left speaker to a volume of 50%
#aup.playFile("/usr/share/naoqi/wav/random.wav",0.5,-1.0) |
import os, sys, shutil
import zipfile
from zipfile import ZipFile
from urllib import urlretrieve
from subprocess import Popen, PIPE
from distutils.cmd import Command
def zip_directory(dir, zip_file):
zip = ZipFile(zip_file, 'w', compression=zipfile.ZIP_DEFLATED)
root_len = len(os.path.abspath(dir))
for root, dirs, files in os.walk(dir):
archive_root = os.path.abspath(root)[root_len:]
for f in files:
fullpath = os.path.join(root, f)
archive_name = os.path.join(archive_root, f)
zip.write(fullpath, archive_name, zipfile.ZIP_DEFLATED)
zip.close()
class WindowsPortableBuild(Command):
description = "custom build command that builds portable win32 package"
user_options = [
('dist-dir=', None,
"path of dist directory to use for building portable pymt, the end result will be output to this driectory. default to cwd."),
('deps-url=', None,
"url of binary dependancies for portable pymt package default: http://pymt.googlecode.com/files/portable-deps-win32.zip"),
('no-cext', None,
"flag to disable building of c extensions"),
('no-mingw', None,
"flag to disable bundling of mingw compiler for compiling c/cython extensions")
]
def initialize_options(self):
self.dist_dir = None
self.deps_url = None
self.no_cext = None
self.no_mingw = None
def finalize_options(self):
if not self.deps_url:
self.deps_url = 'http://pymt.googlecode.com/files/portable-deps-win32.zip'
if not self.dist_dir:
self.dist_dir = os.getcwd()
self.src_dir = os.path.dirname(sys.modules['__main__'].__file__)
self.dist_name = self.distribution.get_fullname() # e.g. PyMT-0.5 (name and verison passed to setup())
self.build_dir = os.path.join(self.dist_dir, self.dist_name+'-w32')
def run(self):
print "---------------------------------"
print "Building PyMT Portable for Win 32"
print "---------------------------------"
print "\nPreparing Build..."
print "---------------------------------------"
if os.path.exists(self.build_dir):
print "*Cleaning old build dir"
shutil.rmtree(self.build_dir, ignore_errors=True)
print "*Creating build directory:"
print " "+self.build_dir
os.makedirs(self.build_dir)
print "\nGetting binary dependencies..."
print "---------------------------------------"
print "*Downloading:", self.deps_url
#report_hook is called every time a piece of teh file is downloaded to print progress
def report_hook(block_count, block_size, total_size):
p = block_count*block_size*100.0/total_size
print "\b\b\b\b\b\b\b\b\b", "%06.2f"%p +"%",
print " Progress: 000.00%",
urlretrieve(self.deps_url, #location of binary dependencioes needed for portable pymt
os.path.join(self.build_dir,'deps.zip'), #tmp file to store teh archive
reporthook=report_hook)
print " [Done]"
print "*Extracting binary dependencies..."
zf = ZipFile(os.path.join(self.build_dir,'deps.zip'))
zf.extractall(self.build_dir)
zf.close()
if self.no_mingw:
print "*Excluding MinGW from portable distribution (--no-mingw option is set)"
shutil.rmtree(os.path.join(self.build_dir, 'MinGW'), ignore_errors=True)
print "\nPutting pymt into portable environment"
print "---------------------------------------"
print "*Building pymt source distribution"
sdist_cmd = [sys.executable, #path to python.exe
os.path.join(self.src_dir,'setup.py'), #path to setup.py
'sdist', #make setup.py create a src distribution
'--dist-dir=%s'%self.build_dir] #put it into build folder
Popen(sdist_cmd, stdout=PIPE, stderr=PIPE).communicate()
print "*Placing pymt source distribution in portable context"
src_dist = os.path.join(self.build_dir,self.dist_name)
zf = ZipFile(src_dist+'.zip')
zf.extractall(self.build_dir)
zf.close()
if self.no_mingw or self.no_cext:
print "*Skipping C Extension build (either --no_cext or --no_mingw option set)"
else:
| print "*Compiling C Extensions inplace for portable distribution"
cext_cmd = [sys.executable, #path to python.exe
'setup.py',
'build_ext', #make setup.py create a sr | c distribution
'--inplace'] #do it inplace
#this time it runs teh setup.py inside the source distribution
#thats has been generated inside the build dir (to generate ext
#for teh target, instead of the source were building from)
Popen(cext_cmd, cwd=src_dist, stdout=PIPE, stderr=PIPE).communicate()
print "\nFinalizing pymt portable distribution..."
print "---------------------------------------"
print "*Copying scripts and resources"
#copy launcher script and readme to portable root dir/build dir
pymt_bat = os.path.join(src_dist,'pymt','tools','packaging','win32', 'pymt.bat')
shutil.copy(pymt_bat, os.path.join(self.build_dir, 'pymt.bat'))
readme = os.path.join(src_dist,'pymt','tools','packaging','win32', 'README.txt')
shutil.copy(readme, os.path.join(self.build_dir, 'README.txt'))
#rename pymt directory to "pymt"
os.rename(src_dist, os.path.join(self.build_dir,'pymt'))
print "*Removing intermediate file"
os.remove(os.path.join(self.build_dir,'deps.zip'))
os.remove(os.path.join(self.build_dir,src_dist+'.zip'))
print "*Compressing portable distribution target"
target = os.path.join(self.dist_dir, self.dist_name+"-w32.zip")
zip_directory(self.build_dir, target)
print "*Writing target:", target
print "*Removing build dir"
shutil.rmtree(self.build_dir, ignore_errors=True)
|
# Copyright 2014 Alcatel-Lucent USA Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and | limi | tations
# under the License.
''' Nuage specific exceptions '''
from neutron.common import exceptions as n_exc
class OperationNotSupported(n_exc.InvalidConfigurationOption):
message = _("Nuage Plugin does not support this operation: %(msg)s")
class NuageBadRequest(n_exc.BadRequest):
message = _("Bad request: %(msg)s")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-26 14:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel | (
name='Occupancy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('room_name', models.CharField(max_length=255)),
('occupancy', models.IntegerField()),
('timest | amp', models.DateField()),
],
),
]
|
from djan | go import template
from django.conf import settings
from django.template.defaultfilters import stringfilter
import os
register = template.Library()
@register.filter(name='basename')
@stringfilter
def basename(value):
return os.path.basename(value)
@register.filter(name='replace_macros')
@stringfilter
def replace_macros(value, user_dict):
return value.replace("#FIRSTNAME#", use | r_dict['first_name'].strip()) \
.replace("#LASTNAME#", user_dict['last_name'].strip())
@register.filter(name='state_label_css')
def state_label_css(subm):
green_label = "badge label label-success"
red_label = "badge label label-important"
grey_label = "badge label label-info"
# We expect a submission as input
if subm.is_closed() and subm.grading:
if subm.grading.means_passed:
return green_label
else:
return red_label
if subm.state in [subm.SUBMITTED_TESTED,
subm.SUBMITTED,
subm.TEST_FULL_PENDING,
subm.GRADED,
subm.TEST_FULL_FAILED]:
return green_label
if subm.state == subm.TEST_VALIDITY_FAILED:
return red_label
return grey_label
@register.assignment_tag
def setting(name):
return getattr(settings, name, "")
@register.inclusion_tag('inclusion_tags/details_table.html')
def details_table(submission):
return {'submission': submission}
@register.inclusion_tag('inclusion_tags/deadline.html')
def deadline_timeout(assignment):
return {'assignment': assignment, 'show_timeout': True}
@register.inclusion_tag('inclusion_tags/deadline.html')
def deadline(assignment):
return {'assignment': assignment, 'show_timeout': False}
@register.inclusion_tag('inclusion_tags/grading.html')
def grading(submission):
return {'submission': submission}
|
"""
This script is a starting point for new Blocks users already familiar with
Machine Learning and Theano.
We demonstrate how to use blocks to train a generic set of parameters (theano
shared variables) that influence some arbitrary cost function (theano
symbolic variable), so you can start using blocks features (e.g. monitoring,
extensions, training algorithms) | with your Theano code today.
To run an experiment, we simply construct a main_loop.MainLoop and call its
run() method. It suffices to pass the MainLoop a blocks.model.Model
(which needs the cost), a blocks.algorithms.TrainingAlgorithm (which needs the
cost and parameters), and a fuel.streams.DataStream*
As it is the script will run indefinitely, with no output. | You can interrupt
training training anytime with Ctrl+C, or termination conditions can be added
via extensions.
*The DataStream object is part of the partner library Fuel
(https://github.com/mila-udem/fuel).
"""
import numpy
np = numpy
import theano
import theano.tensor as T
# (Here we make a toy dataset of two 2D gaussians with different means.)
num_examples = 1000
batch_size = 100
means = np.array([[-1., -1.], [1, 1]])
std = 0.5
labels = np.random.randint(size=num_examples, low=0, high=1)
features = means[labels, :] + std * np.random.normal(size=(num_examples, 2))
labels = labels.reshape((num_examples, 1)).astype(theano.config.floatX)
features = features.astype(theano.config.floatX)
# Define "data_stream"
from collections import OrderedDict
from fuel.datasets import IndexableDataset
# The names here (e.g. 'name1') need to match the names of the variables which
# are the roots of the computational graph for the cost.
dataset = IndexableDataset(
OrderedDict([('name1', features), ('name2', labels)]))
from fuel.streams import DataStream, ForceFloatX
from fuel.schemes import SequentialScheme
data_stream = ForceFloatX(DataStream(dataset,
iteration_scheme=SequentialScheme(
dataset.num_examples, batch_size)))
# Define "cost" and "parameters"
# (We use logistic regression to classify points by distribution)
inputs = T.matrix('name1')
targets = T.matrix('name2')
ninp, nout = 2, 1
W = theano.shared(.01*np.random.uniform(
size=((ninp, nout))).astype(theano.config.floatX))
b = theano.shared(np.zeros(nout).astype(theano.config.floatX))
output = T.nnet.sigmoid(T.dot(inputs, W) + b)
# a theano symbolic expression
cost = T.mean(T.nnet.binary_crossentropy(output, targets))
# a list of theano.shared variables
parameters = [W, b]
# wrap everything in Blocks objects and run!
from blocks.model import Model
model = Model([cost])
from blocks.algorithms import GradientDescent, Scale
algorithm = GradientDescent(cost=cost,
parameters=parameters,
step_rule=Scale(learning_rate=.01))
from blocks.main_loop import MainLoop
my_loop = MainLoop(model=model,
data_stream=data_stream,
algorithm=algorithm)
my_loop.run()
|
if men.point == 2:
screen.blit(lab_help_red, (342, 380))
else:
screen.blit(lab_help, (342, 380))
if men.point == 3:
screen.blit(lab_credits_red, (359, 430))
else:
screen.blit(lab_credits, (359, 430))
pygame.display.update()
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_DOWN:
if men.point < 3:
men.point += 1
laserFX.play()
if event.key == K_UP:
if men.point > 1:
men.point -= 1
laserFX.play()
if event.key == K_RETURN:
if men.point == 1:
start = True
menutheme.stop()
leveltheme.play()
if men.point == 2:
men.location = 2
if men.point == 3:
men.location = 3
if event.key == K_ESCAPE:
if men.location == 2 or men.location == 3:
men.location = 1
if event.type == QUIT:
pygame.quit()
sys.exit()
playerobj = cplayer(100, 250, 100, 100)
# Initializing messages
getreadymessage1 = stdfont.render("The Battle Approaches...",
True, (255, 255, 255))
getreadymessage = stdfont.render("PREPARE YOURSELF!", True, (255, 255, 0))
endmsg1 = stdfont.render("People die in war.", True, (0, 0, 0))
endmsg2 = stdfont.render("Good night, sweet prince.", True, (0, 0, 0))
# Initializing enemy objects
e1 = cenemy1(HORI_RES, VERT_RES, 5)
e2 = cenemy2(HORI_RES, VERT_RES, 3)
e3 = cenemy3(HORI_RES, VERT_RES)
b1 = cboss1(HORI_RES, VERT_RES)
b2 = cboss2(HORI_RES, VERT_RES)
e4 = cenemy4(HORI_RES, VERT_RES, 2)
# Explosion object
expobj = C_Explosion()
# Detecto | r
detect = cDetect()
# Main game loop
while True:
scorelabel = stdfont.render("Hits: " + str(score), True,
(255, 255, 255))
healthlabel = stdfont.render("Health: " + str(playerobj.health), True,
(255, 255, 255))
energylabel = stdfont.render("Energy: " + str(playerobj.en | ergy), True,
(225, 225, 255))
collidelabel = stdfont_bold.render("Collision Detected!", True,
(225, 0, 0))
clock.tick(FPS)
timer += 1
if flash < 11:
flash += 1
# -----------------------------------------------------
# start of stage control code
# Starting the game after the initial delay
if stage == 0 and timer > 0:
stage = 1
# Refreshing enemy 1 during STAGE 1 and STAGE 3
# 2nd and 3rd instance of enemy type 1
if stage == 1:
if timer > 0:
if not e1.death[0]:
e1.refresh(0)
if e1.xpos[0] < -40:
e1.respawn(0)
if timer > 60:
if not e1.death[1]:
e1.refresh(1)
if e1.xpos[1] < -40:
e1.respawn(1)
if timer > 80:
if not e1.death[2]:
e1.refresh(2)
if e1.xpos[2] < -40:
e1.respawn(2)
if timer > 100:
if not e1.death[3]:
e1.refresh(3)
if e1.xpos[3] < -40:
e1.respawn(3)
if timer > 120:
if not e1.death[4]:
e1.refresh(4)
if e1.xpos[4] < -40:
e1.respawn(4)
# When kill quota is reached in stage 1
if stage == 1 and quota > 10:
for ennum in range(0, 5):
expobj.addexplosion(e1.xpos[ennum], e1.ypos[ennum])
quota = 0
b1 = cboss1(HORI_RES, VERT_RES)
stage = 2
leveltheme.stop()
bossfight.play()
# When boss1 dies during STAGE 2
if stage == 2:
b1.refresh()
if b1.health < 1:
stage = 3
e1.respawn(0)
e2.respawn(0)
delay1 = 0
bossfight.stop()
leveltheme.play()
# Refreshing enemy2 during STAGE 3
if stage == 3:
if not e1.death[0]:
e1.refresh(0)
if e1.xpos[0] < -40:
e1.respawn(0)
if not e2.death[0]:
e2.refresh(0)
if e2.xpos[0] < -40:
e2.respawn(0)
if delay1 < 76:
delay1 += 1
if delay1 == 50:
e2.respawn(1)
if delay1 == 75:
e2.respawn(2)
delay1 = 76
if delay1 > 50:
if not e2.death[1]:
e2.refresh(1)
if e2.xpos[1] < -40:
e2.respawn(1)
if delay1 > 75:
if not e2.death[2]:
e2.refresh(2)
if e2.xpos[2] < -40:
e2.respawn(2)
# When kill quota is reached during STAGE 3
if stage == 3 and quota > 10:
expobj.addexplosion(e1.xpos[0], e1.ypos[0])
for ennum in range(0, 3):
expobj.addexplosion(e2.xpos[ennum], e2.ypos[ennum])
quota = 0
b2 = cboss2(HORI_RES, VERT_RES)
stage = 4
leveltheme.stop()
boss2fight.play()
# Refreshing boss2 during STAGE 4
if stage == 4:
b2.refresh()
if b2.health < 1:
stage = 5
quota = 0
e3.respawn()
e2.respawn(0)
boss2fight.stop()
leveltheme.play()
if stage == 5:
if not e3.death:
e3.refresh()
if e3.xpos < -40:
e3.respawn()
if not e2.death[0]:
e2.refresh(0)
if e2.xpos[0] < -40:
e2.respawn(0)
if quota > 10:
stage = 6
quota = 0
b3 = cboss3(HORI_RES, VERT_RES)
leveltheme.stop()
boss3fight.play()
if stage == 6:
b3.refresh()
if b3.health < 1:
quota = 0
stage = 7
delay1 = 0
e4.respawn(0)
boss3fight.stop()
leveltheme.play()
if stage == 7:
if delay1 < 50:
delay1 += 1
if delay1 == 50:
e4.respawn(1)
delay1 = 51
if not e4.death[0]:
e4.refresh(0, playerobj.ypos)
if e4.xpos[0] < -40:
e4.respawn(0)
if delay1 == 51:
if not e4.death[1]:
e4.refresh(1, playerobj.ypos)
if e4.xpos[1] < -40:
e4.respawn(1)
if quota > 10:
stage = 8
quota = 0
b4 = cboss4(HORI_RES, VERT_RES)
leveltheme.stop()
boss4fight.play()
if stage == 8:
b4.refresh()
if b4.health < 1:
stage = 9
quota = 0
e2.respawn(0)
e3.respawn()
e4.respawn(0)
boss4fight.stop()
leveltheme.play()
if stage == 9:
if not e2.death[0]:
e2.refresh(0)
if not e3.death:
e3.refresh()
if not e4.death[0]:
e4.refresh(0, playerobj.ypos)
if e2.xpos[0] < -40:
e2.respawn(0)
if e3.xpos < -40:
e3.respawn()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
# Datetime ############################
dt = datetime.datetime.now()
print(dt)
dt = datetime.datetime(year=2018, month=8, day=30, hour=13, minute=30)
print(dt)
print(dt.isoformat())
# Date ################################
d = datetime.date.today()
print(d)
d = datetime.datetime.now().d | ate()
print(d)
d = datetime.date(year=2018, month=8, day=30)
print(d)
print(d.isoformat())
# Time ####################### | #########
t = datetime.datetime.now().time()
print(t)
t = datetime.time(hour=1, minute=30)
print(t)
print(t.isoformat())
|
"""AWS plugin for integration tests."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ..util import (
ApplicationError,
display,
ConfigParser,
)
from . import (
CloudProvider,
CloudEnvironment,
CloudEnvironmentConfig,
)
from ..core_ci import (
AnsibleCoreCI,
)
class AwsCloudProvider(CloudProvider):
"""AWS cloud provider plugin. Sets up cloud resources before delegation."""
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
if os.path.isfile(self.config_static_path):
return
aci = self._create_ansible_core_ci()
if aci.available:
return
super(AwsCloudProvider, self).filter(targets, exclude)
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(AwsCloudProvider, self).setup()
aws_config_path = os.path.expanduser('~/.aws')
if os.path.exists(aws_config_path) and not self.args.docker and not self.args.remote:
raise ApplicationError('Rename "%s" or use the --docker or --remote option to isolate tests.' % aws_config_path)
if not self._use_static_config():
self._setup_dynamic()
def _setup_dynamic(self):
"""Request AWS credentials through the Ansible Core CI service."""
display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
config = self._read_config_template()
aci = self._create_ansible_core_ci()
response = aci.start()
if not self.args.explain:
credentials = response['aws']['credentials']
values = dict(
ACCESS_KEY=credentials['access_key'],
SECRET_KEY=credentials['secret_key'],
SECURITY_TOKEN=credentials['session_token'],
REGION='us-east-1',
)
display.sensitive.add(values['SECRET_KEY'])
display.sensitive.add(values['SECURITY_TOKEN'])
config = self._populate_config_template(config, values)
self._write_config(config)
def _create_ansible_core_ci(self):
"""
:rtype: AnsibleCoreCI
"""
return AnsibleCoreCI(self.args, 'aws', 'sts', persist=False, stage=self.args.remote_stage, provider=self.args.remote_provider)
class AwsCloudEnvironment(CloudEnvironment):
"""AWS cloud environment plugin. Updates integration test environment after delegation."""
| def get_environment_config(self):
"""
:rtype: CloudEnvironmentConfig
"" | "
parser = ConfigParser()
parser.read(self.config_path)
ansible_vars = dict(
resource_prefix=self.resource_prefix,
)
ansible_vars.update(dict(parser.items('default')))
display.sensitive.add(ansible_vars.get('aws_secret_key'))
display.sensitive.add(ansible_vars.get('security_token'))
if 'aws_cleanup' not in ansible_vars:
ansible_vars['aws_cleanup'] = not self.managed
env_vars = {'ANSIBLE_DEBUG_BOTOCORE_LOGS': 'True'}
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
callback_plugins=['aws_resource_actions'],
)
def on_failure(self, target, tries):
"""
:type target: TestTarget
:type tries: int
"""
if not tries and self.managed:
display.notice('If %s failed due to permissions, the IAM test policy may need to be updated. '
'https://docs.ansible.com/ansible/devel/dev_guide/platforms/aws_guidelines.html#aws-permissions-for-integration-tests.'
% target.name)
|
from Sire.IO import *
from Sire.MM import *
from Sire.System import *
from Sire.Mol import *
from Sire.Maths import *
from Sire.FF import *
from Sire.Move import *
from Sire.Units import *
from Sire.Vol import *
from Sire.Qt import *
import os
coul_cutoff = 20 * angstrom
lj_cutoff = 10 * angstrom
amber = Amber()
(molecules, space) = amber.readCrdTop("test/io/waterbox.crd", "test/io/waterbox.top")
system = System()
swapwaters = MoleculeGroup("swapwaters")
waters = MoleculeGroup("waters")
molnums = molecules.molNums();
for molnum in molnums:
water = molecules[molnum].molecule()
if water.residue().number() == ResNum(2025):
center_water = water
swapwaters.add(center_water)
center_point = center_water.evaluate().center()
for molnum in molnums:
if molnum != center_water.number():
water = molecules[molnum].molecule()
if Vector.distance(center_point, water.evaluate().center()) < 7.5:
water = water.residue().edit().setProperty("PDB-residue-name", "SWP").commit()
swapwaters.add(water)
else:
waters.add(water)
system.add(swapwaters)
system.add(waters)
gridff = GridFF("gridff")
gridff.setCombiningRules("arithmetic")
print("Combining rules are %s" % gridff.combiningRules())
gridff.setBuffer(2 * angstrom)
gridff.setGridSpacing( 0.5 * angstrom )
gr | idff.setLJCutoff(lj_cutoff)
gridff.setCoulombCutoff(coul_cutoff)
gridff.setShiftElectrostatics(True)
#gridff.setUseAtomisticCutoff(True)
#gridff.setUseReactionField(True)
cljgridff = CLJGrid()
cljgridff.setCLJFunction( CLJShiftFunction(coul_cutoff,lj_cutoff) )
cljgridff.setFixedAtoms( CLJAtoms(waters.molecules()) )
cljatoms = CLJAtoms(swapwaters.molecules())
cljgridff.setGridDimensions( cljatoms, | 0.5 * angstrom, 2 * angstrom )
print("Grid box equals %s" % cljgridff.grid())
cljboxes = CLJBoxes(cljatoms)
(cnrg, ljnrg) = cljgridff.calculate(cljboxes)
print("CLJGridFF: %s %s %s" % (cnrg+ljnrg, cnrg, ljnrg))
cljgridff.setUseGrid(False)
(cnrg, ljnrg) = cljgridff.calculate(cljboxes)
print("CLJGridFF: %s %s %s" % (cnrg+ljnrg, cnrg, ljnrg))
gridff.add(swapwaters, MGIdx(0))
gridff.add(waters, MGIdx(1))
gridff.setSpace( Cartesian() )
gridff2 = GridFF2("gridff2")
gridff2.setCombiningRules("arithmetic")
gridff2.setBuffer(2*angstrom)
gridff2.setGridSpacing( 0.5 * angstrom )
gridff2.setLJCutoff(lj_cutoff)
gridff2.setCoulombCutoff(coul_cutoff)
gridff2.setShiftElectrostatics(True)
#gridff2.setUseAtomisticCutoff(True)
#gridff2.setUseReactionField(True)
gridff2.add( swapwaters, MGIdx(0) )
gridff2.addFixedAtoms(waters.molecules())
gridff2.setSpace( Cartesian() )
testff = TestFF()
testff.add( swapwaters.molecules() )
testff.addFixedAtoms(waters.molecules())
testff.setCutoff(coul_cutoff, lj_cutoff)
cljff = InterGroupCLJFF("cljff")
cljff.setSwitchingFunction( HarmonicSwitchingFunction(coul_cutoff,coul_cutoff,lj_cutoff,lj_cutoff) )
cljff.add(swapwaters, MGIdx(0))
cljff.add(waters, MGIdx(1))
cljff.setShiftElectrostatics(True)
#cljff.setUseAtomisticCutoff(True)
#cljff.setUseReactionField(True)
cljff.setSpace( Cartesian() )
cljff2 = InterCLJFF("cljff2")
cljff2.setSwitchingFunction( HarmonicSwitchingFunction(coul_cutoff,coul_cutoff,lj_cutoff,lj_cutoff) )
cljff2.add(waters)
cljff2.setShiftElectrostatics(True)
cljff2.setSpace( Cartesian() )
print(gridff.energies())
print(gridff2.energies())
print("\nEnergies")
print(gridff.energies())
print(gridff2.energies())
t = QTime()
t.start()
nrgs = cljff.energies()
ms = t.elapsed()
print(cljff.energies())
print("Took %d ms" % ms)
testff.calculateEnergy()
t.start()
nrgs = cljff2.energies()
ms = t.elapsed()
print("\nExact compare")
print(cljff2.energies())
print("Took %d ms" % ms)
|
._get_size(size)
return stats
def list_luns_for_host(self, host):
tree = self._request("/show/host-maps", host)
return [int(prop.text) for prop in tree.xpath(
"//PROPERTY[@name='lun']")]
def _get_first_available_lun_for_host(self, host):
luns = self.list_luns_for_host(host)
lun = 1
while True:
if lun not in luns:
return lun
lun += 1
def map_volume(self, volume_name, connector, connector_element):
if connector_element == 'wwpns':
lun = self._get_first_available_lun_for_host(connector['wwpns'][0])
host = ",".join(connector['wwpns'])
else:
host = connector['initiator']
host_status = self._check_host(host)
if host_status != 0:
hostname = self._safe_hostname(connector['host'])
self._request("/create/host", hostname, id=host)
lun = self._get_first_available_lun_for_host(host)
self._request("/map/volume",
volume_name,
lun=str(lun),
host=host,
access="rw")
return lun
def unmap_volume(self, volume_name, connector, connector_element):
if connector_element == 'wwpns':
host = ",".join(connector['wwpns'])
else:
host = connector['initiator']
self._request("/unmap/volume", volume_name, host=host)
def get_active_target_ports(self):
ports = []
tree = self._request("/show/ports")
for obj in tree.xpath("//OBJECT[@basetype='port']"):
port = {prop.get('name'): prop.text
for prop in obj.iter("PROPERTY")
if prop.get('name') in
["port-type", "target-id", "status"]}
if port['status'] == 'Up':
ports.append(port)
return ports
def get_active_fc_target_ports(self):
return [port['target-id'] for port in self.get_active_target_ports()
if port['port-type'] == "FC"]
def get_active_iscsi_target_iqns(self):
return [port['target-id'] for port in self.get_active_target_ports()
if port['port-type'] == "iSCSI"]
def linear_copy_volume(self, src_name, dest_name, dest_bknd_name):
"""Copy a linear volume."""
self._request("/volumecopy",
dest_name,
dest_vdisk=dest_bknd_name,
source_volume=src_name,
prompt='yes')
# The copy has started; now monitor until the operation completes.
count = 0
while True:
tree = self._request("/show/volumecopy-status")
return_code = tree.findtext(".//PROPERTY[@name='return-code']")
if return_code == '0':
status = tree.findtext(".//PROPERTY[@name='progress']")
progress = False
if status:
progress = True
LOG.debug("Volume copy is in progress: %s", status)
if not progress:
LOG.debug("Volume copy completed: %s", status)
break
else:
if count >= 5:
LOG.error(_LE('Error in copying volume: %s'), src_name)
raise exception.DotHillRequestError
break
time.sleep(1)
count += 1
time.sleep(5)
def copy_volume(self, src_name, dest_name, dest_bknd_name,
backend_type='virtual'):
"""Copy a linear or virtual volume."""
if backend_type == 'linear':
return self.linear_copy_volume(src_name, dest_name, dest_bknd_name)
# Copy a virtual volume to another in the same pool.
self._request("/copy/volume", src_name, name=dest_name)
LOG.debug("Volume copy of source_volume: %(src_name)s to "
"destination_volume: %(dest_name)s started.",
{'src_name': src_name, 'dest_name': dest_name, })
# Loop until this volume copy is no longer in progress.
while self.volume_copy_in_progress(src_name):
time.sleep(5)
# Once the copy operation is finished, check to ensure that
# the volume was not deleted because of a subsequent error. An
# exception will be raised if the named volume is not present.
self._request("/show/volumes", dest_name)
LOG.debug("Volume copy of source_volume: %(src_name)s to "
"destination_volume: %(dest_name)s completed.",
{'src_name': src_name, 'dest_name': dest_name, })
def volume_copy_in_progress(self, src_name):
"""Check if a volume copy is in progress for the named volume."""
# 'show volume-copies' always succeeds, even if none in progress.
tree = self._request("/show/volume-copies")
# Find 0 or 1 job(s) with source volume we're interested in
q = "OBJECT[PROPERTY[@name='source-volume']/text()='%s']" % src_name
joblist = tree.xpath(q)
if len(joblist) == 0:
return False
LOG.debug("Volume copy of volume: %(src_name)s is "
"%(pc)s percent completed.",
{'src_name': src_name,
'pc': joblist[0].findtext("PROPERTY[@name='progress']"), })
return True
def _check_host(self, host):
host_status = -1
tree = self._request("/show/hosts")
for prop in tree.xpath("//PROPERTY[@name='host-id' and text()='%s']"
% host):
host_status = 0
return host_status
def _safe_hostname(self, hostname):
"""Modify an initiator name to match firmware requirements.
Initiator name cannot include certain characters and cannot exceed
15 bytes in 'T' firmware (32 bytes in 'G' firmware).
"""
for ch in [',', '"', '\\', '<', '>']:
if ch in hostname:
hostname = hostname.replace(ch, '')
index = len(hostname)
if index > 15:
index = 15
return hostname[:index]
def get_active_iscsi_target_portals(self):
# This function returns {'ip': status,}
portals = {}
prop = 'ip-address'
tree = self._request("/show/ports")
for el in tree.xpath("//PROPERTY[@name='primary-ip-address']"):
prop = 'primary-ip-address'
break
iscsi_ips = [ip.text for ip in tree.xpath(
"//PROPERTY[@name='%s']" % prop)]
if not iscsi_ips:
return portals
for index, port_type in enumerate(tree.xpath(
"//PROPERTY[@name='port-type' and text()='iSCSI']")):
status = port_type.getparent().findtext("PROPERTY[@name='status']")
if status == 'Up':
| portals[iscsi_ips[index]] = status
return portals
def get_chap_record(self, initiator_name):
tree = self._request("/show/chap-records")
for prop in tree.xpath("//PROPERTY[@name='initiator-name' and "
| "text()='%s']" % initiator_name):
chap_secret = prop.getparent().findtext("PROPERTY[@name='initiator"
"-secret']")
return chap_secret
def create_chap_record(self, initiator_name, chap_secret):
self._request("/create/chap-record",
name=initiator_name,
secret=chap_secret)
def get_serial_number(self):
tree = self._request("/show/system")
return tree.findtext(".//PROPERTY[@name='midplane-serial-number']")
def get_owner_info(self, backend_name, backend_type):
if backend_type == 'linear':
tree = self._request("/show/vdisks", backend_name)
else:
tree = self._request("/show/pools", backend_name)
return tree.findtext(".//PROPERTY[@name='owner']")
def modify_volume_name(self, old_name, new_name):
self._request("/se |
moyal = tfd.Moyal(loc=0., scale=scale, validate_args=True)
self.assertIs(scale, moyal.scale)
self.evaluate(moyal.mean())
with tf.control_dependencies([scale.assign([-.01])]):
with self.assertRaisesOpError('Argument `scale` must be positive.'):
self.evaluate(moyal.mean())
def testMoyalLogPdf(self):
batch_size = 6
loc = np.array([0.] * batch_size, dtype=self.dtype)
scale = np.array([3.] * batch_size, dtype=self.dtype)
x = np.array([2., 3., 4., 5., 6., 7.], dtype=self.dtype)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
log_pdf = moyal.log_prob(self.make_tensor(x))
self.assertAllClose(
stats.moyal.logpdf(x, loc=loc, scale=scale),
self.evaluate(log_pdf))
pdf = moyal.prob(x)
self.assertAllClose(
stats.moyal.pdf(x, loc=loc, scale=scale), self.evaluate(pdf))
def testMoyalLogPdfMultidimensional(self):
batch_size = 6
loc = np.array([[2.0, 4.0, 5.0]] * batch_size, dtype=self.dtype)
scale = np.array([1.0], dtype=self.dtype)
x = np.array([[2., 3., 4., 5., 6., 7.]], dtype=self.dtype).T
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
log_pdf = moyal.log_prob(self.make_tensor(x))
self.assertAllClose(
self.evaluate(log_pdf), stats.moyal.logpdf(x, loc=loc, scale=scale))
pdf = moyal.prob(self.make_tensor(x))
self.assertAllClose(
self.evaluate(pdf), stats.moyal.pdf(x, loc=loc, scale=scale))
def testMoyalCDF(self):
batch_size = 6
loc = np.array([0.] * batch_size, dtype=self.dtype)
scale = np.array([3.] * batch_size, dtype=self.dtype)
x = np.array([2., 3., 4., 5., 6., 7.], dtype=self.dtype)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
log_cdf = moyal.log_cdf(self.make_tensor(x))
self.assertAllClose(
self.evaluate(log_cdf), stats.moyal.logcdf(x, loc=loc, scale=scale))
cdf = moyal.cdf(self.make_tensor(x))
self.assertAllClose(
self.evaluate(cdf), stats.moyal.cdf(x, loc=loc, scale=scale))
def testMoyalCdfMultidimensional(self):
batch_size = 6
loc = np.array([[2.0, 4.0, 5.0]] * batch_size, dtype=self.dtype)
scale = np.array([1.0], dtype=self.dtype)
x = np.array([[2., 3., 4., 5., 6., 7.]], dtype=self.dtype).T
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
log_cdf = moyal.log_cdf(self.make_tensor(x))
self.assertAllClose(
self.evaluate(log_cdf),
stats.moyal.logcdf(x, loc=loc, scale=scale))
cdf = moyal.cdf(self.make_tensor(x))
self.assertAllClose(
self.evaluate(cdf),
stats.moyal.cdf(x, loc=loc, scale=scale))
def testMoyalMean(self):
batch_size = 6
loc = np.array([[2.0, 4.0, 5.0]] * batch_size, dtype=self.dtype)
scale = np.array([1.0], dtype=self.dtype)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
self.assertAllClose(self.evaluate(moyal.mean()),
stats.moyal.mean(loc=loc, scale=scale))
def testMoyalVariance(self):
batch_size = 6
loc = np.array([[2.0, 4.0, 5.0]] * batch_size, dtype=self.dtype)
scale = np.array([1.0], dtype=self.dtype)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
self.assertAllClose(self.evaluate(moyal.variance()),
stats.moyal.var(loc=loc, scale=scale))
def testMoyalStd(self):
batch_size = 6
loc = np.array([[2.0, 4.0, 5.0]] * batch_size, dtype=self.dtype)
scale = np.array([1.0], dtype=self.dtype)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
self.assertAllClose(self.evaluate(moyal.stddev()),
stats.moyal.std(loc=loc, scale=scale))
def testMoyalMode(self):
batch_size = 6
loc = np.array([[2.0, 4.0, 5.0]] * batch_size, dtype=self.dtype)
scale = np.array([1.0], dtype=self.dtype)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
self.assertAllClose(self.evaluate(moyal.mode()), self.evaluate(moyal.loc))
def testMoyalSample(self):
loc = self.dtype(4.0)
scale = self.dtype(1.0)
n = int(3e5)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
samples = moyal.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n,), sample_values.shape)
self.assertAllClose(
stats.moyal.mean(loc=loc, scale=scale),
sample_values.mean(), rtol=.01)
self.assertAllClose(
stats.moyal.var(loc=loc, scale=scale),
sample_values.var(), rtol=.01)
def testMoyalSampleMultidimensionalMean(self):
batch_size = 6
loc = np.array([[2.0, 4.0, 5.0]] * batch_size, dtype=self.dtype)
scale = np.array([1.0, 0.8, 0.5], dtype=self.dtype)
n = int(2e5)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
samples = moyal.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
# TODO(b/157561663): Remove the masking once tf.math.special.erfcinv exists.
sample_values = np.ma.masked_invalid(sample_values)
self.assertAllClose(
stats.moyal.mean(loc=loc, scale=scale),
sample_values.mean(axis=0),
rtol=.03,
atol=0)
def testMoyalSampleMultidimensionalVar(self):
batch_size = 6
loc = np.array([[2.0, 4.0, 5.0]] * batch_size, dtype=self.dtype)
scale = np.array([1.0, 0.8, 0.5], dtype=self.dtype)
n = int(1e5)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
samples = moyal.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
# TODO(b/157561663): Remove the masking once tf.math.special.erfcinv exists.
sample_values = np.ma.masked_invalid(sample_values)
self.assertAllClose(
stats.moyal.var(loc=loc, scale=scale),
sample_values.var(axis=0),
rtol=.03,
atol=0)
def testMoyalMoyalKL(self):
a_loc = np.arange(-2.0, 3.0, 1.0)
a_scale = np.arange(0.5, 2.5, 0.5)
b_loc = 2 * np.arange(-2.0, 3.0, 1.0)
b_scale = np.arange(0.5, 2.5, 0.5)
# This reshape is intended to expand the number of test cases.
a_loc = a_loc.reshape((len(a_loc), 1, 1, 1))
a_scale = a_scale.reshape((1, len(a_scale), 1, 1))
b_loc = b_loc.reshape((1, 1, len(b_loc), 1))
b_scale = b_scale.reshape((1, 1, 1, len(b_scale)))
a = tfd.Moyal(loc=a_loc, scale=a_scale, validate_args=True)
b = tfd.Moyal(loc=b_loc, scale=b_scale, validate_args=True)
kl = tfd.kl_divergence(a, b)
x = a.sample(int(3e5), seed=test_util.test_seed())
kl_sample = tf.reduce_mean(a.log_prob(x) - b.log_prob(x), axis=0)
kl_, kl_sample_ = self.evaluate([kl, kl_sample])
self.assertAllClose(kl_, kl_sample_, atol=1e-15, rtol=1e-1)
zero_kl = tfd.kl_divergence(a, a)
true_zero_kl_, zero_kl_ = self.evaluate([tf.zeros_like(zero_kl), zero_kl])
self.assertAllClose(true_zero_kl_, zero_kl_)
@test_util.test_all_tf_exec | ution_regimes
class MoyalTestStaticShape(test_util.TestCase, _MoyalTest):
dtype = np.float32
use_static_shape = True
@test_util.test_all_tf_execution_regimes
class MoyalTestDynamicShape(test_util.TestCase, _Mo | yalTest):
dtype = np.float32
use_static_shape = False
@test_util.test_all_tf_execution_regimes
class MoyalTestFloat64StaticShape(test_util.TestCase, _MoyalTest):
dtype = np.float64
use_static_shape = True
@test_util.test_all_tf_execution_regimes
class MoyalTestFloat64DynamicShape(test_util.TestCase, _MoyalTest):
dtype = np.float64
use_static_shape = Fals |
""" UnitTests for the SimpleHTTPServer
"""
import mock
import unittest
class TestHTTPServerHandler(unittest.TestCase):
"""
"""
def setUp(self):
self.handler = m | ock.Mock()
def test_do_GET(self):
pass
def test_do_POST(self):
pass
def tearDown(self):
self.handler()
if __name__ == "__main__":
unittest.ma | in() |
#a=[int(x) for x in in | put().split()]
#print (a)
x=5
y=10
b=[int(y) for y in input().split()]
#a=[int | (x) for x in input().split()]
dir(_builtins_)
|
a = "python"
print(a*2)
try:
print(a[-10])
except IndexError as e:
print("์ธ๋ฑ์ค ๋ฒ์๋ฅผ ์ด๊ณผ ํ์ต๋๋ค.")
pr | int(e)
print(a[0:4])
print(a[1:-2])
# -10์ hi๋ค๋ก 10์นธ
print("%-10sjane." % "hi")
b = "Python is best choice."
print(b.find("b"))
print(b.find("B"))
try:
print(b.index("B"))
except ValueError as e:
print(e)
c = "hi"
| print(c.upper())
a = " hi"
print("kk",a.lstrip())
a = " hi "
print(a.strip())
|
# 3rd party imports
from reportlab.platypus import Image, Paragraph, PageBreak, Table, Spacer
from reportlab.lib.units import cm
from reportlab.lib.pagesizes import A4
# Django imports
from django.conf import settings
# Project imports
from .arabic_reshaper import reshape
from .pdf_canvas import NumberedCanvas, getArabicStyle, getHeaderStyle, getTableStyle, \
get_hnec_logo_fname, drawHnecLogo
from .strings import STRINGS
from .utils import chunker, format_name, CountingDocTemplate, build_copy_info, \
truncate_center_name, out_of_disk_space_handler_context
from libya_elections.constants import MALE, FEMALE
def generate_pdf(filename, center, voter_roll, gender, center_book=False):
# filename: the file to which the PDF will be written
# center: a data_pull.Center instance
# voter_roll: list of registration dicts --
# {national_id, first_name, father_name, grandfather_name, family_name, gender}
# gender: one of the MALE/FEMALE constants. UNISEX is not valid.
# center_book: ???
#
# separates by gender code using one of the constants in utils.Gender
# sorts by name fields in query
# assembles display string from parts
# writes to filename
#
# returns number of pages in the PDF
if gender not in (MALE, FEMALE):
raise ValueError("generate_pdf() gender must be MALE or FEMALE")
# set styles
styles = getArabicStyle()
# get strings
mf_string | = STRINGS['female'] if (gender == FEMALE) else STRINGS['male']
cover_string = STRINGS['center_book_cover'] if center_book else STRINGS['center_list_cover']
header_string = STRINGS['center_book_header'] if center_book else STRINGS['center_list_header']
# cover page
center_name = reshape(center.name)
template = '%s: %s / %s'
subconstituency_name = reshape(center.subconstituency.na | me_arabic)
params = (STRINGS['subconstituency_name'], center.subconstituency.id, subconstituency_name)
subconstituency = template % params
center_info = {
'gender': '%s: %s' % (STRINGS['gender'], mf_string),
'number': '%s: %d' % (STRINGS['center_number'], center.center_id),
'name': '%s: %s' % (STRINGS['center_name'], center_name),
'name_trunc': '%s: %s' % (STRINGS['center_name'], truncate_center_name(center_name)),
'subconstituency': subconstituency,
'copy_info': build_copy_info(center),
}
# create document
doc = CountingDocTemplate(filename, pagesize=A4, topMargin=1 * cm, bottomMargin=1 * cm,
leftMargin=1.5 * cm, rightMargin=2.54 * cm)
# elements, cover page first
with open(get_hnec_logo_fname(), 'rb') as hnec_f:
elements = [
Image(hnec_f, width=10 * cm, height=2.55 * cm),
Spacer(48, 48),
Paragraph(cover_string, styles['Title']),
Spacer(18, 18),
Paragraph(center_info['gender'], styles['CoverInfo-Bold']),
Paragraph(center_info['number'], styles['CoverInfo']),
Paragraph(center_info['name'], styles['CoverInfo']),
Paragraph(center_info['copy_info'], styles['CoverInfo']),
Paragraph(center_info['subconstituency'], styles['CoverInfo']),
PageBreak(),
]
# Focus on one specific gender.
voter_roll = [voter for voter in voter_roll if voter.gender == gender]
# We wrap the page header in a table because we want the header's gray background to extend
# margin-to-margin and that's easy to do with a table + background color. It's probably
# possible with Paragraphs alone, but I'm too lazy^w busy to figure out how.
# It's necessary to wrap the table cell text in Paragraphs to ensure the base text direction
# is RTL. See https://github.com/hnec-vr/libya-elections/issues/1197
para_prefix = Paragraph(STRINGS['center_header_prefix'], styles['InnerPageHeader'])
para_header = Paragraph(header_string, styles['InnerPageHeader'])
page_header = Table([[para_prefix], [para_header]], 15 * cm, [16, 24])
page_header.setStyle(getHeaderStyle())
n_pages = 0
for page in chunker(voter_roll, settings.ROLLGEN_REGISTRATIONS_PER_PAGE_REGISTRATION):
n_pages += 1
elements.append(page_header)
elements += [Paragraph(center_info['gender'], styles['CenterInfo-Bold']),
Paragraph(center_info['number'], styles['CenterInfo']),
Paragraph(center_info['name_trunc'], styles['CenterInfo']),
]
elements.append(Spacer(10, 10))
# The contents of each table cell are wrapped in a Paragraph to set the base text
# direction.
# See https://github.com/hnec-vr/libya-elections/issues/1197
data = [[Paragraph(reshape(format_name(voter)), styles['TableCell'])] for voter in page]
# Insert header before the data.
data.insert(0, [Paragraph(STRINGS['the_names'], styles['TableCell'])])
table = Table(data, 15 * cm, 0.825 * cm)
table.setStyle(getTableStyle())
elements.append(table)
elements.append(Paragraph(mf_string, styles['PageBottom']))
elements.append(PageBreak())
if not n_pages:
# When there are no pages (==> no registrants for this gender), we need to emit a page
# that states that.
elements.append(page_header)
key = 'no_male_registrants' if gender == MALE else 'no_female_registrants'
elements.append(Paragraph(STRINGS[key], styles['BlankPageNotice']))
with out_of_disk_space_handler_context():
doc.build(elements, canvasmaker=NumberedCanvas, onLaterPages=drawHnecLogo)
return doc.n_pages
|
from .analysis import *
from | .toolbox import *
from . import ut | ils
|
"""Utilities for extracting common archive formats"""
import zipfile
import tarfile
import os
import shutil
import posixpath
import contextlib
from distutils.errors import DistutilsError
if "__PEX_UNVENDORED__" in __import__("os").environ:
from pkg_resources import ensure_directory # vendor:skip
else:
from pex.third_party.pkg_resources import ensure_directory
__all__ = [
"unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter",
"UnrecognizedFormat", "extraction_drivers", "unpack_directory",
]
class UnrecognizedFormat(DistutilsError):
"""Couldn't recognize the archive type"""
def default_filter(src, dst):
"""The default progress/filter callback; returns True for all files"""
return dst
def unpack_archive(filename, extract_dir, progress_filter=default_filter,
drivers=None):
"""Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat``
`progress_filter` is a function taking two arguments: a source path
internal to the archive ('/'-separated), and a filesystem path where it
will be extracted. The callback must return the desired extract path
(which may be the same as the one passed in), or else ``None`` to skip
that file or directory. The callback can thus be used to report on the
progress of the extraction, as well as to filter the items extracted or
alter their extraction paths.
`drivers`, if supplied, must be a non-empty sequence of functions with the
same signature as this function (minus the `drivers` argument), that raise
``UnrecognizedFormat`` if they do not support extracting the designated
archive type. The `drivers` are tried in sequence until one is found that
does not raise an error, or until all are exhausted (in which case
``UnrecognizedFormat`` is raised). If you do not supply a sequence of
drivers, the module's ``extraction_drivers`` constant will be used, which
means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that
order.
"""
for driver in drivers or extraction_drivers:
try:
driver(filename, extract_dir, progress_filter)
except UnrecognizedFormat:
continue
else:
return
else:
raise UnrecognizedFormat(
"Not a recognized archive type: %s" % filename
)
def unpack_directory(filename, extract_dir, progress_filter=default_filter):
""""Unpack" a directory, using the same interface as for archives
Raises ``UnrecognizedFormat`` if `filename` is not a directory
"""
if not os.path.isdir(filename):
raise UnrecognizedFormat("%s is not a directory" % filename)
paths = {
filename: ('', extract_dir),
}
for base, dirs, files in os.walk(filename):
src, dst = paths[base]
for d in dirs:
paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d)
for f in files:
target = os.path.join(dst, f)
target = progress_filter(src + f, target)
if not target:
# skip non-files
continue
ensure_directory(target)
f = os.path.join(base, f)
shutil.copyfile(f, target)
shutil.copystat(f, target)
def unpack_zipfile(filename, extract_dir, progress_filter=default_filter):
"""Unpack zip `filename` to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation
of the `progress_filter` argument.
"""
if not zipfile.is_zipfile(filename):
raise UnrecognizedFormat("%s is not a zip file" % (filename,))
with zipfile.ZipFile(filename) as z:
for info in z.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name.split('/'):
continue
target = os.path.join(extract_dir, *name.split('/'))
target = progress_filter(name, target)
if not target:
continue
if name.endswith('/'):
# directory
ensure_directory(target)
else:
# file
ensure_directory(target)
data = z.read(info.filename)
with open(target, 'wb') as f:
f.write(data)
unix_attributes = info.external_attr >> 16
if unix_attributes:
os.chmod(target, unix_attributes)
def unpack_tarfile(filename, extract_dir, progress_filter=default_filter):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined
by ``tarfile.open()``). See ``unpack_archive()`` for an explanation
of the `progress_filter` argument.
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise UnrecognizedFormat(
"%s is not a compressed or uncompressed tar file" % (filename,)
)
with contextlib.closing(tarobj):
# don't d | o any chowning!
tarobj.chown = lambda *args: None
for member in tarobj:
name = member.name
# don't extract absolute paths or ones with .. in them
if not name.startswith('/') and '..' not in name.split('/'):
prelim_dst = os.path.join(extract_dir, *name.split('/') | )
# resolve any links and to extract the link targets as normal
# files
while member is not None and (member.islnk() or member.issym()):
linkpath = member.linkname
if member.issym():
base = posixpath.dirname(member.name)
linkpath = posixpath.join(base, linkpath)
linkpath = posixpath.normpath(linkpath)
member = tarobj._getmember(linkpath)
if member is not None and (member.isfile() or member.isdir()):
final_dst = progress_filter(name, prelim_dst)
if final_dst:
if final_dst.endswith(os.sep):
final_dst = final_dst[:-1]
try:
# XXX Ugh
tarobj._extract_member(member, final_dst)
except tarfile.ExtractError:
# chown/chmod/mkfifo/mknode/makedev failed
pass
return True
extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile
|
# coding: utf-8
# These tests are taken from astropy, as with the astrodynamics.constant.Constant
# class. It retains the original license (see licenses/ASTROPY_LICENSE.txt)
from __future__ import absolute_import, division, print_function
import copy
import astropy.units as u
from astropy.units import Quantity
import astrodynamics.constants as const
from astrodynamics.constants import J2, Constant
def test_units():
"""Confirm that none of the constants defined in astrodynamics have invalid
units.
"""
for key, val in vars(const).items():
if isinstance(val, Constant):
# Getting the unit forces the unit | parser to run.
assert not isinstance(val.unit, u.UnrecognizedUnit)
def test_copy():
copied = copy.deepcopy(J2)
assert copied == J2
copied = copy.copy(J2)
assert copied == J2
def test_view():
"""Check that Constant and Quantity views can be taken."""
x = J2
x2 = x.view(Constant)
assert x2 == x
assert x2.value == x.value
# make sure it has the necessary attributes and they're not b | lank
assert x2.uncertainty
assert x2.name == x.name
assert x2.reference == x.reference
assert x2.unit == x.unit
q1 = x.view(Quantity)
assert q1 == x
assert q1.value == x.value
assert type(q1) is Quantity
assert not hasattr(q1, 'reference')
q2 = Quantity(x)
assert q2 == x
assert q2.value == x.value
assert type(q2) is Quantity
assert not hasattr(q2, 'reference')
x3 = Quantity(x, subok=True)
assert x3 == x
assert x3.value == x.value
# make sure it has the necessary attributes and they're not blank
assert x3.uncertainty
assert x3.name == x.name
assert x3.reference == x.reference
assert x3.unit == x.unit
x4 = Quantity(x, subok=True, copy=False)
assert x4 is x
def test_repr():
a = Constant('the name', value=1, unit='m2', uncertainty=0.1, reference='me')
s = ("Constant(name='the name', value=1, unit='m2', uncertainty=0.1, "
"reference='me')")
assert repr(a) == s
|
# coding: utf-8
# # Query `apiso:ServiceType`
# In[43]:
from owslib.csw import CatalogueServiceWeb
from owslib import fes
import numpy as np
# The GetCaps request for these services looks like this:
# http://catalog.data.gov/csw-all/csw?SERVICE=CSW&VERSION=2.0.2&REQUEST=GetCapabilities
# In[56]:
endpoint = 'http://data.ioos.us/csw' # FAILS apiso:ServiceType
#endpoint = 'http://catalog.data.gov/csw-all' # FAILS apiso:ServiceType
#endpoint = 'http://geoport.whoi.edu/csw' # SUCCEEDS apiso:ServiceType
csw = CatalogueServiceWeb(endpoint,timeout=60)
print csw.version
# In[57]:
csw.get_operation_by_name('GetRecords').constraints
# Search first for records containing the text "COAWST" and "experimental".
# In[45]:
val = 'coawst'
filter1 = fes.PropertyIsLike(propertyname='apiso:AnyText',literal=('*%s*' % val),
escapeChar='\\',wildCard='*',singleChar='?')
filter_list = [ filter1 ]
# In[46]:
val = 'experimental'
filter2 = fes.PropertyIsLike(propertyname='apiso:AnyText',literal=('*%s*' % val),
escapeChar='\\',wildCard='*',singleChar='?')
filter_list = [fes.And([filter1, filter2])]
# In[47]:
csw.getrecords2(constraints=filter_list,maxrecords=100,esn='full')
print len(csw.records.keys())
for rec in list(csw. | records.keys()):
print csw.records[rec].title
# Now let's print out the references (service endpoints) to see what types of services are available
# In[48]:
choice=np.random.choice(list(csw.records.keys()))
print(csw.records[choice].title)
csw.records[choice].references
# In[49]:
csw.records[choice].xml
# We see that the `OPeNDAP` service is available, so let's see if we can add that to the query, returning only datasets that have text "CO | AWST" and "experimental" and that have an "opendap" service available.
#
# We should get the same number of records, as all COAWST records have OPeNDAP service endpoints. If we get no records, something is wrong with the CSW server.
# In[50]:
val = 'OPeNDAP'
filter3 = fes.PropertyIsLike(propertyname='apiso:ServiceType',literal=('*%s*' % val),
escapeChar='\\',wildCard='*',singleChar='?')
filter_list = [fes.And([filter1, filter2, filter3])]
csw.getrecords2(constraints=filter_list, maxrecords=1000)
# In[51]:
print(len(csw.records.keys()))
for rec in list(csw.records.keys()):
print('title:'+csw.records[rec].title)
print('identifier:'+csw.records[rec].identifier)
print('modified:'+csw.records[rec].modified)
print(' ')
# In[53]:
print(csw.request)
# In[ ]:
|
from __future__ import unicode_literals
import json
from django.utils import six
from kgb import SpyAgency
from reviewboard.hostingsvcs.github import GitHub
from reviewboard.hostingsvcs.models import HostingServiceAccount
from reviewboard.hostingsvcs.repository import RemoteRepository
from reviewboard.hostingsvcs.utils.paginator import APIPaginator
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (
remote_repository_item_mimetype,
remote_repository_list_mimetype)
from reviewboard.webapi.tests.mixins import BasicTestsMetaclass
from reviewboard.webapi.tests.urls import (get_remote_repository_item_url,
get_remote_repository_list_url)
def _compare_item(self, item_rsp, remote_repository):
self.assertEqual(item_rsp['id'], remote_repository.id)
self.assertEqual(item_rsp['name'], remote_repository.name)
self.assertEqual(item_rsp['owner'], remote_repository.owner)
self.assertEqual(item_rsp['scm_type'], remote_repository.scm_type)
self.assertEqual(item_rsp['path'], remote_repository.path)
self.assertEqual(item_rsp['mirror_path'], remote_repository.mirror_path)
class RemoteRepositoryTestPaginator(APIPaginator):
def __init__(self, results):
self.results = results
super(RemoteRepositoryTestPaginator, self).__init__(client=None,
url='')
def fetch_url(self, url):
return {
'data': self.results,
}
@six.add_metaclass(BasicTestsMetaclass)
class ResourceListTests(SpyAgency, BaseWebAPITestCase):
"""Testing the RemoteRepositoryResource list APIs."""
fixtures = ['test_users']
sample_api_url = 'hosting-service-accounts/<id>/remote-repositories/'
resource = resources.remote_repository
basic_get_use_admin = True
compare_item = _compare_item
def setup_http_not_allowed_list_test(self, user):
account = HostingServiceAccount.objects.create(service_name='github',
username='bob')
return | get_remote_repository_list_url(account)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
account = HostingServiceAccount.objects.create(
service_name='github',
username='bob',
local_site=self.get_local_site_or_none(name= | local_site_name),
data=json.dumps({
'authorization': {
'token': '123',
},
}))
service = account.service
remote_repositories = [
RemoteRepository(service,
repository_id='123',
name='repo1',
owner='bob',
scm_type='Git',
path='ssh://example.com/repo1',
mirror_path='https://example.com/repo1'),
RemoteRepository(service,
repository_id='456',
name='repo2',
owner='bob',
scm_type='Git',
path='ssh://example.com/repo2',
mirror_path='https://example.com/repo2'),
]
paginator = RemoteRepositoryTestPaginator(remote_repositories)
self.spy_on(GitHub.get_remote_repositories,
owner=GitHub,
call_fake=lambda *args, **kwargs: paginator)
return (get_remote_repository_list_url(account, local_site_name),
remote_repository_list_mimetype,
remote_repositories)
@six.add_metaclass(BasicTestsMetaclass)
class ResourceItemTests(SpyAgency, BaseWebAPITestCase):
"""Testing the RemoteRepositoryResource item APIs."""
fixtures = ['test_users']
sample_api_url = 'hosting-service-accounts/<id>/remote-repositories/<id>/'
resource = resources.remote_repository
basic_get_use_admin = True
compare_item = _compare_item
def setup_http_not_allowed_item_test(self, user):
account = HostingServiceAccount.objects.create(service_name='github',
username='bob')
remote_repository = RemoteRepository(
account.service,
repository_id='123',
name='repo1',
owner='bob',
scm_type='Git',
path='ssh://example.com/repo1')
return get_remote_repository_item_url(remote_repository)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
account = HostingServiceAccount.objects.create(
service_name='github',
username='bob',
local_site=self.get_local_site_or_none(name=local_site_name),
data=json.dumps({
'authorization': {
'token': '123',
},
}))
remote_repository = RemoteRepository(
account.service,
repository_id='123',
name='repo1',
owner='bob',
scm_type='Git',
path='ssh://example.com/repo1',
mirror_path='https://example.com/repo1')
self.spy_on(GitHub.get_remote_repository,
owner=GitHub,
call_fake=lambda *args, **kwargs: remote_repository)
return (get_remote_repository_item_url(remote_repository,
local_site_name),
remote_repository_item_mimetype,
remote_repository)
|
-attributes, too-many-public-methods
# pylint: disable=abstract-method
class SonosDevice(MediaPlayerDevice):
"""Representation of a Sonos device."""
# pylint: disable=too-many-arguments
def __init__(self, hass, player):
"""Initialize the Sonos device."""
from soco.snapshot import Snapshot
self.hass = hass
self.volume_increment = 5
self._player = player
self._speaker_info = None
self._name = None
self._coordinator = None
self._media_content_id = None
self._media_duration = None
self._media_image_url = None
self._media_artist = None
self._media_album_name = None
self._media_title = None
self.update()
self.soco_snapshot = Snapshot(self._player)
@property
def should_poll(self):
"""Polling needed."""
return True
def update_sonos(self, now):
"""Update state, called by track_utc_time_change."""
self.update_ha_state(True)
@property
def unique_id(self):
"""Return an unique ID."""
return self._player.uid
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._status == 'PAUSED_PLAYBACK':
return STATE_PAUSED
if self._status == 'PLAYING':
return STATE_PLAYING
if self._status == 'STOPPED':
return STATE_IDLE
if self._status == 'OFF':
return STATE_OFF
return STATE_UNKNOWN
@property
def is_coordinator(self):
"""Return true if player is a coordinator."""
return self._player.is_coordinator
def update(self):
"""Retrieve latest state."""
self._speaker_info = self._player.get_speaker_info()
self._name = self._speaker_info['zone_name'].replace(
' (R)', '').replace(' (L)', '')
if self.available:
self._status = self._player.get_current_transport_info().get(
'current_transport_state')
trackinfo = self._player.get_current_track_info()
if trackinfo['uri'].startswith('x-rincon:'):
# this speaker is a slave, find the coordinator
# the uri of the track is 'x-rincon:{coordinator-id}'
coordinator_id = trackinfo['uri'][9:]
coordinators = [device for device in DEVICES
if device.unique_id == coordinator_id]
self._coordinator = coordinators[0] if coordinators else None
else:
self._coordinator = None
if not self._coordinator:
mediainfo = self._player.avTransport.GetMediaInfo([
('InstanceID', 0)
])
duration = trackinfo.get('duration', '0:00')
# if the speaker is playing from the "line-in" source, getting
# track metadata can return NOT_IMPLEMENTED, which breaks the
# volume logic below
if duration == 'NOT_IMPLEMENTED':
duration = None
else:
duration = sum(60 ** x[0] * int(x[1]) for x in enumer | ate(
reversed(duration.split(':'))))
| media_image_url = trackinfo.get('album_art', None)
media_artist = trackinfo.get('artist', None)
media_album_name = trackinfo.get('album', None)
media_title = trackinfo.get('title', None)
if media_image_url in ('', 'NOT_IMPLEMENTED', None):
# fallback to asking the speaker directly
media_image_url = \
'http://{host}:{port}/getaa?s=1&u={uri}'.format(
host=self._player.ip_address,
port=1400,
uri=urllib.parse.quote(mediainfo['CurrentURI'])
)
if media_artist in ('', 'NOT_IMPLEMENTED', None):
# if listening to a radio stream the media_artist field
# will be empty and the title field will contain the
# filename that is being streamed
current_uri_metadata = mediainfo["CurrentURIMetaData"]
if current_uri_metadata not in \
('', 'NOT_IMPLEMENTED', None):
# currently soco does not have an API for this
import soco
current_uri_metadata = soco.xml.XML.fromstring(
soco.utils.really_utf8(current_uri_metadata))
md_title = current_uri_metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}title')
if md_title not in ('', 'NOT_IMPLEMENTED', None):
media_artist = ''
media_title = md_title
self._media_content_id = trackinfo.get('title', None)
self._media_duration = duration
self._media_image_url = media_image_url
self._media_artist = media_artist
self._media_album_name = media_album_name
self._media_title = media_title
else:
self._status = 'OFF'
self._coordinator = None
self._media_content_id = None
self._media_duration = None
self._media_image_url = None
self._media_artist = None
self._media_album_name = None
self._media_title = None
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._player.volume / 100.0
@property
def is_volume_muted(self):
"""Return true if volume is muted."""
return self._player.mute
@property
def media_content_id(self):
"""Content ID of current playing media."""
if self._coordinator:
return self._coordinator.media_content_id
else:
return self._media_content_id
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if self._coordinator:
return self._coordinator.media_duration
else:
return self._media_duration
@property
def media_image_url(self):
"""Image url of current playing media."""
if self._coordinator:
return self._coordinator.media_image_url
else:
return self._media_image_url
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
if self._coordinator:
return self._coordinator.media_artist
else:
return self._media_artist
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
if self._coordinator:
return self._coordinator.media_album_name
else:
return self._media_album_name
@property
def media_title(self):
"""Title of current playing media."""
if self._player.is_playing_line_in:
return SUPPORT_SOURCE_LINEIN
if self._player.is_playing_tv:
return SUPPORT_SOURCE_TV
if self._coordinator:
return self._coordinator.media_title
else:
return self._media_title
@property
def supported_media_commands(self):
"""Flag of media commands that are supported."""
if not self.source_list:
# some devices do not allow source selection
return SUPPORT_SONOS ^ SUPPORT_SELECT_SOURCE
return SUPPORT_SONOS
def volume_up(self):
"""Volume up media player."""
self._player.volume += self.volume_increment
def volume_down(self):
""" |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from shutil import which
from unittest.mock import patch
from urllib.parse import quote
from libthumbor import CryptoURL
from preggy import expect
from tornado.testing import gen_test
from tests.handlers.test_base_handler import BaseImagingTestCase
from thumbor.config import Config
from thumbor.context import Context, RequestParameters, ServerParameters
from thumbor.importer import Importer
# pylint: disable=broad-except,abstract-method,attribute-defined-outside-init,line-too-long,too-many-public-methods
# pylint: disable=too-many-lines
class ImageOperationsWithAutoWebPTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY="ACME-SEC")
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.STORAGE = "thumbor.storages.no_storage"
cfg.AUTO_WEBP = True
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(
8889, "localhost", "thumbor.conf", None, "info", None
)
server.security_key = "ACME-SEC"
ctx = Context(server, cfg, importer)
ctx.server.gifsicle_path = which("gifsicle")
return ctx
async def get_as_webp(self, url):
return await self.async_fetch(
url, headers={"Accept": "image/webp,*/*;q=0.8"}
)
@gen_test
async def test_can_auto_convert_jpeg(self):
response = await self.get_as_webp("/unsafe/image.jpg")
expect(response.code).to_equal(200)
expect(response.headers).to_include("Vary")
expect(response.headers["Vary"]).to_include("Accept")
expect(response.body).to_be_webp()
@gen_test
async def test_should_not_convert_animated_gifs_to_webp(self):
response = await self.get_as_webp("/unsafe/animated.gif")
expect(response.code).to_equal(200)
expect(response.headers).not_to_include("Vary")
expect(response.body).to_be_gif()
@gen_test
async def test_should_convert_image_with_small_width_and_no_height(self):
response = await self.get_as_webp("/unsafe/0x0:1681x596/1x/image.jpg")
expect(response.code).to_equal(200)
expect(response.headers).to_include("Vary")
expect(response.headers["Vary"]).to_include("Accept")
expect(response.body).to_be_webp()
@gen_test
async def test_should_convert_monochromatic_jpeg(self):
response = await self.get_as_webp("/unsafe/grayscale.jpg")
expect(response.code).to_equal(200)
expect(response.headers).to_include("Vary")
expect(response.headers["Vary"]).to_include("Accept")
expect(response.body).to_be_webp()
@gen_test
async def test_should_convert_cmyk_jpeg(self):
response = await self.get_as_webp("/unsafe/cmyk.jpg")
expect(response.code).to_equal(200)
expect(response.headers).to_include("Vary")
expect(response.headers["Vary"]).to_include("Accept")
expect(response.body).to_be_webp()
@gen_test
async def test_shouldnt_convert_cmyk_jpeg_if_format_specified(self):
response = await self.get_as_webp(
"/unsafe/filters:format(png)/cmyk.jpg"
)
expect(response.code).to_equal(200)
expect(response.headers).not_to_include("Vary")
expect(response.body).to_be_png()
@gen_test
async def test_shouldnt_convert_cmyk_jpeg_if_gif(self):
response = await self.get_as_webp(
"/unsafe/filters:format(gif)/cmyk.jpg"
)
expect(response.code).to_equal(200)
expect(response.headers).not_to_include("Vary")
expect(response.body).to_be_gif()
@gen_test
async def test_shouldnt_convert_if_format_specified(self):
response = await self.get_as_webp(
"/unsafe/filters:format(gif)/image.jpg"
)
expect(response.code).to_equal(200)
expect(response.headers).not_to_include("Vary")
expect(response.body).to_be_gif()
@gen_test
async def test_shouldnt_add_vary_if_format_specified(self):
response = await self.get_as_webp(
"/unsafe/filters:format(webp)/image.jpg"
)
expect(response.code).to_equal(200)
expect(response.headers).not_to_include("Vary")
expect(response.body).to_be_webp()
@gen_test
async def test_should_add_vary_if_format_invalid(self):
response = await self.get_as_webp(
"/unsafe/filters:format(asdf)/image.jpg"
)
expect(response.code).to_equal(200)
expect(response.headers).to_include("Vary")
expect(response.headers["Vary"]).to_include("Accept")
expect(response.body).to_be_webp()
@gen_test
async def test_converting_return_etags(self):
response = await self.get_as_webp("/unsafe/image.jpg")
expect(response.headers).to_include("Etag")
class ImageOperationsWithAutoWebPWithResultStorageTestCase(
BaseImagingTestCase
):
def get_request(self, *args, **kwargs):
return RequestParameters(*args, **kwargs)
def get_context(self):
cfg = Config(SECURITY_KEY="ACME-SEC")
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.RESULT_STORAGE = "thumbor.result_storages.file_storage"
cfg.RESULT_STORAGE_EXPIRATION_SECONDS = 60
cfg.RESULT_STORAGE_FILE_STORAGE_ROOT_PATH = self.root_path
cfg.AUTO_WEBP = True
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(
8889, "localhost", "thumbor.conf", None, "info", None
)
server.security_key = "ACME-SEC"
ctx = Context(server, cfg, importer)
ctx.request = self.get_request()
ctx.server.gifsicle_path = which("gifsicle")
return ctx
@property
def result_storage(self):
return self.context.modules.result_storage
async def get_as_webp(self, url):
return await self.async_fetch(
url, headers={"Accept": "image/webp,*/*;q=0.8"}
)
@patch("thumbor.handlers.Context")
@gen_test
async def test_can_auto_convert_jpeg_from_result_storage(
self, context_mock
): # NOQA
context_mock.return_value = self.context
crypto = CryptoURL("ACME-SEC")
url = crypto.generate(
image_url=quote("http://test.com/smart/image.jpg")
)
self.context.request = self.get_request(url=url, accepts_webp=True)
with open("./tests/fixtures/images/image.webp", "rb") as fixture:
await self.context.modules.result_storage.put(fixture.read())
response = await self.get_as_webp(url)
expect(response.code).to_equal(200)
expect(response.headers).to_in | clude("Vary")
expect(response.headers["Vary"]).to_include("Accept")
| expect(response.body).to_be_webp()
@patch("thumbor.handlers.Context")
@gen_test
async def test_can_auto_convert_unsafe_jpeg_from_result_storage(
self, context_mock
):
context_mock.return_value = self.context
self.context.request = self.get_request(accepts_webp=True)
response = await self.get_as_webp("/unsafe/image.jpg")
expect(response.code).to_equal(200)
expect(response.headers).to_include("Vary")
expect(response.headers["Vary"]).to_include("Accept")
expect(response.body).to_be_webp()
|
#!/usr/bin/env python
import os
import sys
import string
import random
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
kernels = {
'aes-aes' : 'gf_alog,gf_log,gf_mulinv,rj_sbox,rj_xtime,aes_subBytes,aes_addRoundKey,aes_addRoundKey_cpy,aes_shiftRows,aes_mixColumns,aes_expandEncKey,aes256_encrypt_ecb',
'backprop-backprop':'sigmoid,update_layer,update,propagate_error_out,propagate_error_layer,update_weights,propagate_errors,comp_error,backprop',
'bfs-bulk' : 'bfs',
'bfs-queue' : 'bfs',
'kmp-kmp' : 'CPF,kmp',
'fft-strided' : 'fft',
'fft-transpose':'twiddles8,loadx8,loady8,fft1D_512',
'gemm-blocked': 'bbgemm',
'gemm-ncubed' : 'gemm',
'md-grid':'md',
'md-knn':'md_kernel',
'nw-nw' : 'needwun',
'sort-merge' : 'merge,mergesort',
'sort-radix' : 'local_scan,sum_scan,last_step_scan,init,hist,update,ss_sort',
'spmv-crs' : 'spmv',
'spmv-ellpack' : 'ellpack',
'stencil-stencil2d' : 'stencil',
'stencil-stencil3d' : 'stencil3d',
'viterbi-viterbi' : 'viterbi',
}
def main ( | directory, bench, source):
if not 'TRACER_HOME' in os.environ:
raise Exception('Set TRACER_HOME directory as an environment variable')
if not 'MACH_HOME' in os.environ:
raise Exception('Set MACH_HOME directory as an environment variable')
#id = id_generator()
os.chdir(directory)
obj = source + '.llvm'
opt_obj = source + '-opt.llvm'
executable = source + '-instrumented'
os.environ['WORKLOAD' | ]=kernels[bench]
test = os.getenv('MACH_HOME')+'/common/harness.c'
test_obj = source + '_test.llvm'
source_file = source + '.c'
#for key in os.environ.keys():
# print "%30s %s" % (key,os.environ[key])
print directory
print '======================================================================'
command = 'clang -g -O1 -S -I' + os.environ['ALADDIN_HOME'] + \
' -fno-slp-vectorize -fno-vectorize -fno-unroll-loops ' + \
' -fno-inline -fno-builtin -emit-llvm -o ' + obj + ' ' + source_file
print command
os.system(command)
command = 'clang -g -O1 -S -I' + os.environ['ALADDIN_HOME'] + \
' -fno-slp-vectorize -fno-vectorize -fno-unroll-loops ' + \
' -fno-inline -fno-builtin -emit-llvm -o ' + test_obj + ' ' + test
print command
os.system(command)
command = 'opt -S -load=' + os.getenv('TRACER_HOME') + \
'/full-trace/full_trace.so -fulltrace ' + obj + ' -o ' + opt_obj
print command
os.system(command)
command = 'llvm-link -o full.llvm ' + opt_obj + ' ' + test_obj + ' ' + \
os.getenv('TRACER_HOME') + '/profile-func/trace_logger.llvm'
print command
os.system(command)
command = 'llc -O0 -disable-fp-elim -filetype=asm -o full.s full.llvm'
print command
os.system(command)
command = 'gcc -O0 -fno-inline -o ' + executable + ' full.s -lm -lz'
print command
os.system(command)
command = './' + executable + ' input.data check.data'
print command
os.system(command)
print '======================================================================'
if __name__ == '__main__':
directory = sys.argv[1]
bench = sys.argv[2]
source = sys.argv[3]
print directory, bench, source
main(directory, bench, source)
|
import Linked_List
import sys
import random
def split_list(lst, a, b):
if lst.length % 2 == 1:
first_length = (lst.length / 2) + 1
else:
first_length = lst.length / 2
list_iterator = lst.head
count = 0
while count < first_length:
a.append(list_iterator.data)
list_iterator = list_iterator.next
count += 1
while list_iterator != None:
b.append(list_iterator.data)
list_iterator = list_iterator.next
lst = Linked_List.LinkedList()
for iterator in range(0, int(sys.argv[1])):
lst.push(random.randint(1, 101))
print "\nOriginal List:"
lst.print_list()
a = Linked_List.LinkedList()
b = Linked_List.LinkedList()
split_list(lst, a, b)
print "\nSplitted List A:"
a.print_list()
prin | t "\nSplitted List B:"
b.print_list()
# Performance
# ------------
#
# * Speed
# The algorithm traverses the original list once and constructs
# both the list. The list construction operation (append) can be implemented with
# O(1) complexity. In a nutshell, the time complexity of this algorithm is
# O(N).
#
# Ideal time complexity for thi | s algorithm?
# O(1). It's all about changing the pointers. However, the limiting factor is
# traversing the list, which is a linear operation.
#
# * Memory
# 2N. Where N is the memory required to store the original list.
|
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
r'[\]^_`'
'abcdefghijklmnopqrstuvwxyz'
'{|}~')
upper: str = (''' !"#$%&'()*+,-./'''
'0123456789'
':;<=>?@'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
r'[\]^_`'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'{|}~')
lower: str = (''' !"#$%&'()*+,-./'''
'0123456789'
':;<=>?@'
'abcdefghijklmnopqrstuvwxyz'
r'[\]^_`'
'abcdefghijklmnopqrstuvwxyz'
'{|}~')
full: str = ('''ใ๏ผ๏ผ๏ผ๏ผ๏ผ
๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผใผ๏ผ๏ผ'''
'๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ'
'๏ผ๏ผใ๏ผใ๏ผ๏ผ '
'๏ผก๏ผข๏ผฃ๏ผค๏ผฅ๏ผฆ๏ผง๏ผจ๏ผฉ๏ผช๏ผซ๏ผฌ๏ผญ๏ผฎ๏ผฏ๏ผฐ๏ผฑ๏ผฒ๏ผณ๏ผด๏ผต๏ผถ๏ผท๏ผธ๏ผน๏ผบ'
'๏ผป๏ผผ๏ผฝ๏ผพ๏ผฟ๏ฝ'
'๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ
๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ'
'๏ฝ๏ฝ๏ฝ๏ฝ')
parenthesized: str = (''' !"#$%&'()*+,-./'''
'0โดโตโถโทโธโนโบโปโผ'
':;<=>?@'
'โโโโโ โกโขโฃโคโฅโฆโงโจโฉโชโซโฌโญโฎโฏโฐโฑโฒโณโดโต'
r'[\]^_`'
'โโโโโ โกโขโฃโคโฅโฆโงโจโฉโชโซโฌโญโฎโฏโฐโฑโฒโณโดโต'
'{|}~')
circled: str = (''' !"#$%&'()*+,-./'''
'โชโ โกโขโฃโคโฅโฆโงโจ'
':;<=>?@'
'โถโทโธโนโบโปโผโฝโพโฟโโโโโโ
โโโโโโโโโโ'
'[\\]^_`'
'โโโโโโโโโโโโโโโโโ โกโขโฃโคโฅโฆโงโจโฉ'
'{|}~')
smallcaps: str = (''' !"#$%&'()*+,-./'''
'0123456789'
':;<=>?@'
'แดสแดแด
แด๊ฐษขสษชแดแดสแดษดแดแดฉQสsแดแดแด แดกxYแดข'
r'[\]^_`'
'แดสแดแด
แด๊ฐษขสษชแดแดสแดษดแดแดฉqสsแดแดแด แดกxyแดข'
'{|}~')
upsidedown: str = (''' ยก"#$%โ
,()*+โ-./'''
'0123456789'
':;<=>ยฟ@'
'ษqษpวษฦษฅฤฑษพสืษฏuodbษนsสnสสxสz'
r'[\]^_`'
'ษqษpวษฦษฅฤฑษพสืษฏuodbษนsสnสสxสz'
'{|}~')
serifBold: str = (''' !"#$%&'()*+,-./'''
'๐๐๐๐๐๐๐๐๐๐'
':;<=>?@'
'๐๐๐๐๐๐
๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐'
r'[\]^_`'
'๐๐๐๐๐๐๐ ๐ก๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ'
'{|}~')
serifItalic: str = (''' !"#$%&'()*+,-./'''
'0123456789'
':;<=>?@'
'๐ด๐ต๐ถ๐ท๐ธ๐น๐บ๐ป๐ผ๐ฝ๐พ๐ฟ๐๐๐๐๐๐
๐๐๐๐๐๐๐๐'
r'[\]^_`'
'๐๐๐๐๐๐๐โ๐๐๐๐๐๐๐๐๐๐๐ ๐ก๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง'
'{|}~')
serifBoldItalic: str = (''' !"#$%&'()*+,-./'''
'๐๐๐๐๐๐๐๐๐๐'
':;<=>?@'
'๐จ๐ฉ๐ช๐ซ๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต๐ถ๐ท๐ธ๐น๐บ๐ป๐ผ๐ฝ๐พ๐ฟ๐๐'
r'[\]^_`'
'๐๐๐๐
๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐'
'{|}~')
sanSerif: str = (''' !"#$%&'()*+,-./'''
'๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ'
':;<=>?@'
'๐ ๐ก๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต๐ถ๐ท๐ธ๐น'
r'[\]^_`'
'๐บ๐ป๐ผ๐ฝ๐พ๐ฟ๐๐๐๐๐๐
๐๐๐๐๐๐๐๐๐๐๐๐๐๐'
'{|}~')
sanSerifBold: str = (''' !"#$%&'()*+,-./'''
'๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต'
':;<=>?@'
'๐๐๐๐๐๐๐๐๐๐๐๐๐ ๐ก๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ๐ฌ๐ญ'
r'[\]^_`'
'๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต๐ถ๐ท๐ธ๐น๐บ๐ป๐ผ๐ฝ๐พ๐ฟ๐๐๐๐๐๐
๐๐'
'{|}~')
sanSerifItalic: str = (''' !"#$%&'()*+,-./'''
'๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ'
':;<=>?@'
'๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐ ๐ก'
r'[\]^_`'
'๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต๐ถ๐ท๐ธ๐น๐บ๐ป'
'{|}~')
sanSerifBoldItalic: str = (''' !"#$%&'()*+,-./'''
'๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต'
':;<=>?@'
'๐ผ๐ฝ๐พ๐ฟ๐๐๐๐๐๐
๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐'
r'[\]^_`'
'๐๐๐๐๐๐๐๐๐๐๐ ๐ก๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ๐ฌ๐ญ๐ฎ๐ฏ'
'{|}~')
script: str = (''' !"#$%&'()*+,-./'''
'0123456789'
':;<=>?@'
'๐โฌ๐๐โฐโฑ๐ขโโ๐ฅ๐ฆโโณ๐ฉ๐ช๐ซ๐ฌโ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต'
r'[\]^_`'
'๐ถ๐ท๐ธ๐นโฏ๐ปโ๐ฝ๐พ๐ฟ๐๐๐๐โด๐
๐๐๐๐๐๐๐๐๐๐'
'{|}~')
scriptBold: str = (''' !"#$%&'()*+,-./'''
'๐๐๐๐๐๐๐๐๐๐'
':;<=>?@'
'๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐ ๐ก๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ'
r'[\]^_`'
'๐ช๐ซ๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต๐ถ๐ท๐ธ๐น๐บ๐ป๐ผ๐ฝ๐พ๐ฟ๐๐๐๐'
'{|}~')
fraktur: str = (''' !"#$%&'()*+,-./'''
'0123456789'
':;<=>?@'
'๐๐
โญ๐๐๐๐โโ๐๐๐๐๐๐๐๐โ๐๐๐๐๐๐๐โจ'
r'[\]^_`'
'๐๐๐ ๐ก๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต๐ถ๐ท'
'{|}~')
frakturBold: str = (''' !"#$%&'()*+,-./'''
'๐๐๐๐๐๐๐๐๐๐'
':;<=>?@'
'๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต๐ถ๐ท๐ธ๐น๐บ๐ป๐ผ๐ฝ๐พ๐ฟ๐๐๐๐๐๐
'
r'[\]^_`'
'๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐'
'{|}~')
monospace: str = (''' !"#$%&'()*+,-./'''
'๐ถ๐ท๐ธ๐น๐บ๐ป๐ผ๐ฝ๐พ๐ฟ'
':;<=>?@'
'๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต๐ถ๐ท๐ธ๐น๐บ๐ป๐ผ๐ฝ๐พ๐ฟ๐๐๐๐๐๐
๐๐๐๐'
r'[\]^_`'
'๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐ ๐ก๐ข๐ฃ'
'{|}~')
doubleStruck: str = (''' !"#$%&'()*+,-./'''
'๐๐๐๐๐๐๐๐๐ ๐ก'
':;<=>?@'
'๐ธ๐นโ๐ป๐ผ๐ฝ๐พโ๐๐๐๐๐โ๐โโโ๐๐๐๐๐๐๐โค'
r'[\]^_`'
'๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐ ๐ก๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ'
'{|}~')
def _createAsciiTo(name: str,
toTable: str) -> FormatText:
table = str.maketrans(ascii, toTable)
def asciiTo(text: str) -> str:
return text.translate(table)
asciiTo.__name__ = name
return asciiTo
to_upper: FormatText = _createAsciiTo('to_upper', upper)
to_lower: FormatText = _createAsciiTo('to_lower', lower)
to_full_width: FormatText = _createAsciiTo('to_full_width', full)
to_parenthesized: FormatText = _createAsciiTo(
'to_parenthesized', parenthesized)
to_circled: FormatText = _createAsciiTo('to_circled', circled)
to_small_caps: FormatText = _createAsciiTo('to_small_caps', smallcaps)
_to_upside_down_reversed: FormatText = _createAsciiTo(
'to_upside_down', upsidedown)
def to_upside_down(text: str) -> str:
return _to_upside_down_reversed(text)[::-1]
to_serif_bold: FormatText = _createAsciiTo('to_serif_bold', serifBold)
to_serif_italic: FormatText = _createAsciiTo('to_serif_italic', serifItalic)
to_serif_bo | ld_italic: FormatText = _createAsciiTo(
'to_serif_bold_italic', serifBoldItalic)
to_sanserif: FormatText = _createAsciiTo('to_sanserif', sanSerif)
to_sanseri | f_bold: FormatText = _createAsciiTo('to_sanserif_bold', sanSerifBold)
to_sanserif_italic: FormatText = _createAsciiTo(
'to_sanserif_italic', sanSerifItalic)
to_sanserif_bold_italic: FormatText = _createAsciiTo(
'to_sanserif_bold_italic', sanSerifBoldItalic)
to_script: FormatText = _createAsciiTo('to_script', script)
to_script_bold: FormatText = _createAsciiTo('to_script_bold', scriptBold)
to_fraktur: FormatText = _createAsciiTo('to_fraktur', fraktur)
to_fraktur_bold: FormatText = _createAsciiTo('to_fraktur_bold', frakturBold)
to_monospace: FormatText = _createAsciiTo('to_monospace', monospace)
to_double_struck: FormatText = _createAsciiTo('to_double_struck', doubleStruck)
def to_ascii(text: str) -> str:
fromTable: List[str]
fromTable = [full, parenthesized, circled, smallcaps, upsidedown,
serifBold, serifItalic, serifBoldItalic, sanSerif,
sanSerifBold, sanSerifItalic, sanSerifBoldItalic, script,
scriptBold, fraktur, frakturBold, monospace, doubleStruck,
ascii]
toTable: Dict[int, int] = {}
for table in fromTable:
toTable.update(str.maketrans(table, ascii))
return text.translate(toTable)
def format(string: str,
format_: str) -> str:
format_ = format_.lower()
strTable: Dict[str, FormatText] = {
'ascii': to_ascii,
'upper': to_upper,
'lower': to_lower,
'full': to_full_width,
'parenthesized': to_parenthesized,
'circled': to_circled,
'smallcaps': to_small_caps,
'upsidedown': to_upside_down,
'sanserif': to_sanserif,
'script': to_script,
'cursive': to_script,
'fraktur': to_fraktur,
'monospace': to_monospace,
'doublestruck': to_double_struck,
}
reTable: Dict[str, FormatText] = { |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.