prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
self._test_create(params, no_image=True)
def test_create_instance_with_device_name_not_string(self):
self.bdm[0]['device_name'] = 123
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self.assertRaises(self.validation_error,
self._test_create, params, no_image=True)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_bdm_param_not_list(self, mock_create):
self.params = {'block_device_mapping': '/dev/vdb'}
self.assertRaises(self.validation_error,
self._test_create, self.params)
def test_create_instance_with_device_name_empty(self):
self.bdm[0]['device_name'] = ''
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self.assertRaises(self.validation_error,
self._test_create, params, no_image=True)
def test_create_instance_with_device_name_too_long(self):
self.bdm[0]['device_name'] = 'a' * 256
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self.assertRaises(self.validation_error,
self._test_create, params, no_image=True)
def test_create_instance_with_space_in_device_name(self):
self.bdm[0]['device_name'] = 'v da'
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertTrue(kwargs['legacy_bdm'])
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self.assertRaises(self.validation_error,
self._test_create, params, no_image=True)
def test_create_instance_with_invalid_size(self):
self.bdm[0]['volume_size'] = 'hello world'
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self.assertRaises(self.validation_error,
self._test_create, params, no_image=True)
def test_create_instance_bdm(self):
bdm = [{
'source_type': 'volume',
'device_name': 'fake_dev',
'uuid': 'fake_vol'
}]
bdm_expected = [{
'source_type': 'volume',
'device_name': 'fake_dev',
'volume_id': 'fake_vol'
}]
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertFalse(kwargs['legacy_bdm'])
for expected, received in zip(bdm_expected,
kwargs['block_device_mapping']):
self.assertThat(block_device.BlockDeviceDict(expected),
matchers.DictMatches(received))
return old_create(*args, **kwargs)
def _validate_bdm(*args, **kwargs):
pass
self.stubs.Set(compute_api.API, 'create', create)
self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
params = {block_device_mapping.ATTRIBUTE_NAME: bdm}
self._test_create(params, no_image=True)
def test_create_instance_bdm_missing_device_name(self):
del self.bdm[0]['device_name']
old_create = compute_api.API.create
def cr | eate(*args, **kwargs):
self.assertFalse(kwargs['legacy_bdm'])
self.assertNotIn(None,
kwargs['block_device_mapping'][0]['device_name'])
return old_create(*args, **kwargs)
def _validate_bdm(*args, **kwargs):
pass
self.stubs.Set(compute_api.API, 'create', create)
| self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self._test_create(params, no_image=True)
def test_create_instance_bdm_validation_error(self):
def _validate(*args, **kwargs):
raise exception.InvalidBDMFormat(details='Wrong BDM')
self.stubs.Set(block_device.BlockDeviceDict,
'_validate', _validate)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self.assertRaises(exc.HTTPBadRequest,
self._test_create, params, no_image=True)
@mock.patch('nova.compute.api.API._get_bdm_image_metadata')
def test_create_instance_non_bootable_volume_fails(self, fake_bdm_meta):
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
fake_bdm_meta.side_effect = exception.InvalidBDMVolumeNotBootable(id=1)
self.assertRaises(exc.HTTPBadRequest, self._test_create, params,
no_image=True)
def test_create_instance_bdm_api_validation_fails(self):
self.validation_fail_test_validate_called = False
self.validation_fail_instance_destroy_called = False
bdm_exceptions = ((exception.InvalidBDMSnapshot, {'id': 'fake'}),
(exception.InvalidBDMVolume, {'id': 'fake'}),
(exception.InvalidBDMImage, {'id': 'fake'}),
(exception.InvalidBDMBootSequence, {}),
(exception.InvalidBDMLocalsLimit, {}))
ex_iter = iter(bdm_exceptions)
def _validate_bdm(*args, **kwargs):
self.validation_fail_test_validate_called = True
ex, kargs = next(ex_iter)
raise ex(**kargs)
def _instance_destroy(*args, **kwargs):
self.validation_fail_instance_destroy_called = True
self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
self.stubs.Set(objects.Instance, 'destroy', _instance_destroy)
for _unused in range(len(bdm_exceptions)):
params = {block_device_mapping.ATTRIBUTE_NAME:
[self.bdm[0].copy()]}
self.assertRaises(exc.HTTPBadRequest,
self._test_create, params)
self.assertTrue(self.validation_fail_test_validate_called)
self.assertTrue(self.validation_fail_instance_destroy_called)
self.validation_fail_test_validate_called = False
self.validation_fail_instance_destroy_called = False
class BlockDeviceMappingTestV2(BlockDeviceMappingTestV21):
validation_error = exc.HTTPBadRequest
def _setup_controller(self):
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {'os-volumes': 'fake',
'os-block-device-mapping-v2-boot': 'fake'}
self.controller = servers_v2.Controller(self.ext_mgr)
self.ext_mgr_bdm_v2 = extensions.ExtensionManager()
self.ext_mgr_bdm_v2.extensions = {'os-volumes': 'fake'}
self.no_bdm_v2_controller = servers_v2.Controller(
self.ext_mgr_bdm_v2)
def test_create_instance_with_block_device_mapping_disabled(self):
bdm = [{'device_name': 'foo'}]
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertIsNone(kwargs['block_device_mapping'], None)
return old_cre |
# Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import contextlib
from gi.reposi | tory import GObject, Gtk
from quodlibet import _
from quodlibet import app
from quodlibet.plugins.events import EventPlugin
from quodlibet.qltk import Icons
from quodlibet.qltk.seekbutton import TimeLabel
from quodlibet.qltk.tracker import TimeTracker
from quodlibet.qltk import Align
from quodlibet.util import connect_destroy
class SeekBar(Gtk.Box):
def __init__(self, player, library):
super(SeekBar, self).__init__()
self._elapsed_label = TimeLabel()
self._remaining | _label = TimeLabel()
scale = Gtk.Scale(orientation=Gtk.Orientation.HORIZONTAL)
scale.set_adjustment(Gtk.Adjustment.new(0, 0, 0, 3, -15, 0))
scale.set_draw_value(False)
self._scale = scale
self.pack_start(Align(self._elapsed_label, border=6), False, True, 0)
self.pack_start(scale, True, True, 0)
self.pack_start(Align(self._remaining_label, border=6), False, True, 0)
for child in self.get_children():
child.show_all()
self._id = self._scale.connect(
'value-changed', self._on_user_changed, player)
self._scale.connect(
'value-changed', self._on_scale_value_changed, player)
self._tracker = TimeTracker(player)
self._tracker.connect('tick', self._on_tick, player)
connect_destroy(player, 'seek', self._on_player_seek)
connect_destroy(player, 'song-started', self._on_song_started)
connect_destroy(player, "notify::seekable", self._on_seekable_changed)
connect_destroy(
library, "changed", self._on_song_changed, player)
self.connect("destroy", self._on_destroy)
with self._inhibit():
self._update(player)
self._tracker.tick()
def _on_destroy(self, *args):
self._tracker.destroy()
@contextlib.contextmanager
def _inhibit(self):
with GObject.signal_handler_block(self._scale, self._id):
yield
def _on_user_changed(self, scale, player):
if player.seekable:
player.seek(scale.get_value() * 1000)
def _on_scale_value_changed(self, scale, player):
self._update(player)
def _on_tick(self, tracker, player):
position = player.get_position() // 1000
with self._inhibit():
self._scale.set_value(position)
def _on_seekable_changed(self, player, *args):
with self._inhibit():
self._update(player)
def _on_song_changed(self, library, songs, player):
if player.info in songs:
with self._inhibit():
self._update(player)
def _on_player_seek(self, player, song, ms):
with self._inhibit():
self._scale.set_value(ms // 1000)
self._update(player)
def _on_song_started(self, player, song):
with self._inhibit():
self._scale.set_value(0)
self._update(player)
def _update(self, player):
if player.info:
self._scale.set_range(0, player.info("~#length"))
else:
self._scale.set_range(0, 1)
if not player.seekable:
self._scale.set_value(0)
value = self._scale.get_value()
max_ = self._scale.get_adjustment().get_upper()
remaining = value - max_
self._elapsed_label.set_time(value)
self._remaining_label.set_time(remaining)
self._remaining_label.set_disabled(not player.seekable)
self._elapsed_label.set_disabled(not player.seekable)
self.set_sensitive(player.seekable)
class SeekBarPlugin(EventPlugin):
PLUGIN_ID = "SeekBar"
PLUGIN_NAME = _("Alternative Seek Bar")
PLUGIN_DESC = _("Alternative seek bar which is always visible and spans "
"the whole window width.")
PLUGIN_ICON = Icons.GO_JUMP
def enabled(self):
self._bar = SeekBar(app.player, app.librarian)
self._bar.show()
app.window.set_seekbar_widget(self._bar)
def disabled(self):
app.window.set_seekbar_widget(None)
self._bar.destroy()
del self._bar
|
#
# The Qubes OS Project, http://www.qubes-os.org
#
# Copyright (C) 2014-2016 Wojtek Porczyk <woju@invisiblethingslab.com>
# Copyright (C) 2016 Marek Marczykowski <marmarek@invisiblethingslab.com>)
# Copyright (C) 2016 Bahtiar `kalkin-` Gadimov <bahtiar@gadimov.de>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <https://www.gnu.org/licenses/>.
#
''' This module contains the TemplateVM implementation '''
import qubes
import qubes.config
import qubes.vm.qubesvm
import qubes.vm.mix.net
from qubes.config import defaults
from qubes.vm.qubesvm import QubesVM
class TemplateVM(QubesVM):
'''Template for AppVM'''
dir_path_prefix = qubes.config.system_path['qubes_templates_dir']
@property
def appvms(self):
''' Returns a generator containing all domains based on the current
TemplateVM.
'''
for vm in self.app.domains:
if hasattr(vm, 'template') and vm.template is self:
yield vm
netvm = qubes.VMProperty('netvm', load_stage=4, allow_none=True,
default=None,
# pylint: disable=protected-access
setter=qubes.vm.qubesvm.QubesVM.netvm._setter,
doc='VM that provides network connection to this domain. When '
'`None`, machine is disconnected.')
def __init__(self, *args, **kwargs):
| assert 'template' not in kwargs, "A TemplateVM can not have a template"
self.volume_config = {
'root': {
'name': 'root',
'snap_on_start': False,
'save_on_stop': True,
'rw': True,
'source': None,
'size': defaults['root_img_size'],
},
'private': {
'name': 'private',
| 'snap_on_start': False,
'save_on_stop': True,
'rw': True,
'source': None,
'size': defaults['private_img_size'],
'revisions_to_keep': 0,
},
'volatile': {
'name': 'volatile',
'size': defaults['root_img_size'],
'snap_on_start': False,
'save_on_stop': False,
'rw': True,
},
'kernel': {
'name': 'kernel',
'snap_on_start': False,
'save_on_stop': False,
'rw': False
}
}
super(TemplateVM, self).__init__(*args, **kwargs)
@qubes.events.handler('property-set:default_user',
'property-set:kernel',
'property-set:kernelopts',
'property-set:vcpus',
'property-set:memory',
'property-set:maxmem',
'property-set:qrexec_timeout',
'property-set:shutdown_timeout',
'property-set:management_dispvm')
def on_property_set_child(self, _event, name, newvalue, oldvalue=None):
"""Send event about default value change to child VMs
(which use default inherited from the template).
This handler is supposed to be set for properties using
`_default_with_template()` function for the default value.
"""
if newvalue == oldvalue:
return
for vm in self.appvms:
if not vm.property_is_default(name):
continue
vm.fire_event('property-reset:' + name, name=name)
|
num_females=0):
pass
def __str__(self):
stats = "Video({}): num_frames({}), selected_frames({}), num_detections({}), num_males({}), num_females({})".format(self.video_id, self.num_frames, self.selected_frames, self.num_detections, self.num_males, self.num_females)
return stats
def __add__(self, other):
num_frames = self.num_frames + other.num_frames
selected_frames = self.selected_frames + other.selected_frames
num_detections = self.num_detections + other.num_detections
num_males = self.num_males + other.num_males
num_females = self.num_females + other.num_females
return VideoStats(self.video_id, num_frames, selected_frames, num_detections, num_males, num_females)
class Command(BaseCommand):
help = 'Detect faces in videos'
def add_arguments(self, parser):
parser.add_argument('command')
def bbox_area(self, bbox, video):
return ((bbox.x2 - bbox.x1)*video.width) * \
((bbox.y2 - bbox.y1)*video.height)
def compute_iou(self, bbox1, bbox2, video):
int_x1=max(bbox1.x1, bbox2.x1)
int_y1=max(bbox1.y1, bbox2.y1)
int_x2=min(bbox1.x2, bbox2.x2)
int_y2=min(bbox1.y2, bbox2.y2)
int_area = 0.0
if(int_x2 > int_x1 and int_y2 > int_y1):
int_area = ((int_x2 - int_x1)*video.width) * \
((int_y2 - int_y1)*video.height)
iou = int_area/(self.bbox_area(bbox1, video)+self.bbox_area(bbox2, video)-int_area)
return iou
def remove_duplicates(self, l):
s = set()
return [x for x in l
if x not in s and not s.add(x)]
def fetch_ground_truth(self, video, label = "Talking Heads"):
g_labelset = video.handlabeled_labelset() # ground truth
#g_faces = Face.objects.filter(frame__labelset=g_labelset).prefetch_related('frame').all()
g_faces = Face.objects.filter(frame__labelset=g_labelset, frame__labels__name="Talking Heads").prefetch_related('frame').all()
ground_truth_frames = []
g_faces_dict = defaultdict(list)
for g_face in g_faces:
g_faces_dict[g_face.frame.number].append(g_face)
ground_truth_frames.append(g_face.frame.number)
ground_truth_frames = self.remove_duplicates(ground_truth_frames)
return (ground_truth_frames, g_faces_dict)
def fetch_automatic_detections(self, video, label = "Talking Heads"):
d_labelset = video.detected_labelset() # prediction
#d_faces = Face.objects.filter(frame__labelset=d_labelset).prefetch_related('frame').all()
#d_faces = Face.objects.filter(frame__labelset=d_labelset, frame__number__in=ground_truth_frames).prefetch_related('frame').all()
d_faces = Face.objects.filter(frame__labelset=d_labelset).prefetch_related('frame').all()
detected_frames = []
d_faces_dict = defaultdict(list)
# metrics for automatic detection of frames with "talking heads"
face_size_thres = 0.03
det_score_thres = 0.95
for d_face in d_faces:
if d_face.bbox.score > det_score_thres and self.bbox_area(d_face.bbox, video) > (face_size_thres * video.width * video.height):
d_faces_dict[d_face.frame.number].append(d_face)
detected_frames.append(d_face.frame.number)
detected_frames = self.remove_duplicates(detected_frames)
return (detected_frames, d_faces_dict)
def eval_detection(self, video, frame_number, d_faces, g_faces, vstats):
if len(d_faces) == 0 and len(g_faces) == 0:
return (0, 0, 0, 0, 0)
iou_threshold = 0.5
tp_detections = 0
fp_detections = 0
fn_detections = 0
gender_matches = 0
d_dict = defaultdict(int)
g_dict = defaultdict(int)
gender_eval_list = []
for d_face in d_faces:
for g_face in g_faces:
iou = self.compute_iou(d_face.bbox, g_face.bbox, video)
if iou > iou_threshold:
if g_dict[g_face] != 0:
fp_detections += 1
else:
tp_detections += 1
#if d_face.gender == g_face.gender:
# gender_matches += 1
gender_eval_list.append((d_face.gender, g_face.gender))
g_dict[g_face] += 1
d_dict[d_face] += 1
for d_face in d_faces:
if d_dict[d_face] == 0:
fp_detections += 1
for g_face in g_faces:
if g_dict[g_face] == 0:
fn_detections += 1
# update detection stats
vstats.num_detections += len(d_faces)
vstats.tp_detections += tp_detections
vstats.fp_detections += fp_detections
vstats.fn_detections += fn_detections
if fp_detections != 0 or fn_detections != 0:
vstats.mismatched_tp_frames += 1
return (vstats, gender_eval_list)
def eval_frame_selection(self, g_frame_list, d_frame_list):
tp_frames = [x for x in g_frame_list if x in d_frame_list]
fp_frames = [x for x in d_frame_list if x not in tp_frames]
fn_frames = [x for x in g_frame_list if x not in tp_frames]
return (tp_frames, fp_frames, fn_frames)
def eval_gender(self, gender_eval_list, vstats):
num_males = 0
num_females = 0
gender_matches = 0
male_mismatches = 0
female_mismatches = 0
for (d, g) in gender_eval_list:
if d == 'M':
num_males += 1
if g != d:
male_mismatches += 1
else:
gender_matches += 1
else:
num_females += 1
if g != d:
female_mismatches += 1
else:
gender_matches += 1
#update gender stats
vstats.num_males += num_males
vstats.num_females += num_females
vstats.gender_matches += gender_matches
vstats.male_mismatches += male_mismatches
vstats.female_mismatches += female_mismatches
return vstats
def eval_video(self, video):
(ground_truth_frames, g_faces_dict) = self.fetch_ground_truth(video)
(detected_frames, d_faces_dict) = self.fetch_automatic_detections(video)
(tp_frames, fp_frames, fn_frames) = self.eval_frame_selection(ground_t | ruth_frames, detected_frames)
vstats = VideoEva | lStats(video_id=video.id, num_frames=int(video.num_frames/video.get_stride()), tp_frames = len(tp_frames), fp_frames=len(fp_frames), fn_frames=len(fn_frames))
#for frame_number in range(0, 1000, video.get_stride()):
for frame_number in tp_frames:
# evaluate detection
d_faces = d_faces_dict[frame_number]
g_faces = g_faces_dict[frame_number]
(vstats, gender_eval_list) = self.eval_detection(video, frame_number, d_faces, g_faces, vstats)
# evaluate gender
vstats = self.eval_gender(gender_eval_list, vstats)
return vstats
def eval_videos(self, start_video_id, end_video_id):
vtotal_stats = VideoEvalStats(video_id=0)
for video_id in range(start_video_id, end_video_id):
video = Video.objects.filter(id=video_id).get()
vstats = self.eval_video(video)
print(vstats)
vtotal_stats = vtotal_stats + vstats
print(vtotal_stats)
def infer_videos(self, start_video_id, end_video_id):
vtotal_stats = VideoStats(video_id=0)
for video_id in range(start_video_id, end_video_id):
video = Video.objects.filter(id=video_id).get()
(detected_frames, d_faces_dict) = self.fetch_automatic_detections(video)
vstats = VideoStats(video_id=video.id, num_frames=int(video.num_frames/video.get_stride()), selected_frames=len(detected_frames))
#for frame_number in range(0, 1000, video.get_stride()):
for frame_n |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"gui2py's Event Model (encapsulates wx.Event)"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2013- Mariano Reingart"
__license__ = "LGPL 3.0"
# Initial implementation was inspired on PythonCard's event module, altought
# it was almost completely discarded and re-written from scratch to make it
# simpler and mimic web (html/javascript) event models
# References
# https://developer.mozilla.org/en-US/docs/Mozilla_event_reference
# http://wxpython.org/docs/api/wx.Event-class.html
import time
class Event:
"Generic Event Object: holds actual event data (created by EventHandler)"
cancel_default = False # flag to avoid default (generic) handlers
def __init__(self, name="", wx_event=None):
self.wx_event = wx_event
# retrieve wxPython event properties:
wx_obj = self.wx_event.GetEventObject()
# look for gui object (compound control with wx childs controls)
while wx_obj and not hasattr(wx_obj, "obj"):
wx_obj = wx_obj.Parent
self.target = wx_obj.obj if wx_obj else None
self.timestamp = wx_event.GetTimestamp()
# check if timestamp (wx report it only for mouse or keyboard)
if not self.timestamp:
self.timestamp = time.time() # create a new timestamp if not given
self.name = name # name (type), i.e.: "click"
def prevent_default(self, cancel=True):
self.wx_event.Skip(not cancel)
self.cancel_default = cancel
def stop_propagation(self):
self.wx_event.StopPropagation()
class UIEvent(Event):
"General -window- related events (detail can hold additional data)"
names = ["load", "resize", "scroll", "paint", "unload"]
def __init__(self, name, detail=None, wx_event=None):
Event.__init__(self, name, wx_event)
self.detail = detail
# get the top level window:
obj = self.target
while obj and obj.parent:
obj = obj.get_parent()
self.window = obj
def prevent_default(self):
if self.name == 'unload':
if self.wx_event.CanVeto():
self.wx_event.Veto()
else:
raise RuntimeError("Cannot Veto!")
else:
Event.prevent_default(self) # call default implementation
class FocusEvent(Event):
"Focus related events"
names = ["focus", "blur"]
class FormEvent(UIEvent):
"Form HTML-like events "
names = ["select", "change", "reset", "submit", "invalid"]
cancel_default = True # command events should not escalate
class SubmitEvent(FormEvent):
"Form submission handler (includes HTML form data and field contents)"
def __init__(self, name, wx_event=None):
Event.__init__(self, name, wx_event)
self.form = wx_event.form
self.data = wx_event.data
class MouseEvent(Event):
"Mouse related events (wrapper for wx.MouseEvent)"
names = ["click", "dblclick", "mousedown", "mousemove",
"mouseout", "mouseover", "mouseup", "mousewheel"]
def __init__(self, name, wx_event=None):
Event.__init__(self, name, wx_event)
self.x = wx_event.GetX()
self.y = wx_event.GetY()
self.alt_key = wx_event.AltDown()
self.ctrl_key = wx_event.ControlDown()
self.shift_key = wx_event.ShiftDown()
self.meta_key = wx_event.MetaDown()
self.left_button = wx_event.LeftIsDown()
self.right_button = wx_event.RightIsDown()
self.middle_button = wx_event.MiddleIsDown()
if name=="mousewheel":
self.wheel_delta = wx_event.GetWheelDelta()
class KeyEvent(Event):
"Keyboard related event (wrapper for wx.KeyEvent)"
# only sent to the widget that currently has the keyboard focus
names = "onkeypress", "onkeydown", "onkeyup",
def __init__(self, name, wx_event=None):
Event.__init__(self, name, wx_event)
self.ctrl_key = wx_event.ControlDown()
self.shift_key = wx_event.ShiftDown()
self.alt_key = wx_event.AltDown()
self.meta_key = wx_event.MetaDown()
self.key = wx_event.KeyCode # virtual key code value
self.char = unichr(wx_event.GetUnicodeKey()) # Unicode character
class TimingEvent(Event):
"Time interval events"
names = ["idle", "timer"]
def __init__(self, name, interval=None, wx_event=None):
Event.__init__(self, name, wx_event)
self.interval = interval
def request_more(self):
self.wx_event.RequestMore(needMore=True)
class HtmlLinkEvent(UIEvent):
"Html hyperlink click event (href and target)"
def __init__(self, name, detail=None, wx_event=None):
UIEvent.__init__(self, name, wx_event=wx_event,
detail=wx_event.GetLinkInfo().GetHtmlCell())
self.href = wx_event.GetLinkInfo().GetHref()
self.target = wx_event.GetLinkInfo().GetTarget()
class HtmlCellEvent(MouseEvent):
"Html Cell click / hover events"
def __init__(self, name, detail=None, wx_event=None):
MouseEvent.__init__(self, name, wx_event.GetMouseEvent())
self.detail = wx_event.GetCell()
self.point = wx_event.GetPoint()
class HtmlCtrlClickEvent(UIEvent):
"Htm | l Control click "
def __init__(self, name, detail=None, wx_event=None):
UIEvent.__init__(self, name, wx_event=wx_event,
detail=wx_event.ctrl)
class TreeEvent(UIEvent):
"Tree Control ev | ents (detail has the selected/extended/collapsed item)"
def __init__(self, name, detail=None, wx_event=None):
wx_tree = wx_event.GetEventObject()
model = wx_tree.obj.items
wx_item = wx_event.GetItem()
if not wx_item.IsOk():
wx_item = wx_tree.GetSelection()
UIEvent.__init__(self, name, wx_event=wx_event,
detail=model(wx_item))
class GridEvent(UIEvent):
"Grid Control events (mouse, size, edit, etc.)"
def __init__(self, name, detail=None, wx_event=None):
wx_grid = wx_event.GetEventObject()
model = wx_grid.obj.items
try:
self.row = wx_event.GetRow()
self.col = wx_event.GetCol()
self.position = wx_event.GetPosition()
except:
pass
UIEvent.__init__(self, name, wx_event=wx_event,
detail=model[self.row][self.col])
WIDGET_EVENTS = MouseEvent, FocusEvent, TimingEvent
|
from pandas.io.formats.printing import pprint_thing
def frame_apply(obj, func, axis=0, broadcast=None,
raw=False, reduce=None, result_type=None,
ignore_failures=False,
args=None, | kwds=None):
""" construct and return a row or c | olumn based frame apply object """
axis = obj._get_axis_number(axis)
if axis == 0:
klass = FrameRowApply
elif axis == 1:
klass = FrameColumnApply
return klass(obj, func, broadcast=broadcast,
raw=raw, reduce=reduce, result_type=result_type,
ignore_failures=ignore_failures,
args=args, kwds=kwds)
class FrameApply(object):
def __init__(self, obj, func, broadcast, raw, reduce, result_type,
ignore_failures, args, kwds):
self.obj = obj
self.raw = raw
self.ignore_failures = ignore_failures
self.args = args or ()
self.kwds = kwds or {}
if result_type not in [None, 'reduce', 'broadcast', 'expand']:
raise ValueError("invalid value for result_type, must be one "
"of {None, 'reduce', 'broadcast', 'expand'}")
if broadcast is not None:
warnings.warn("The broadcast argument is deprecated and will "
"be removed in a future version. You can specify "
"result_type='broadcast' to broadcast the result "
"to the original dimensions",
FutureWarning, stacklevel=4)
if broadcast:
result_type = 'broadcast'
if reduce is not None:
warnings.warn("The reduce argument is deprecated and will "
"be removed in a future version. You can specify "
"result_type='reduce' to try to reduce the result "
"to the original dimensions",
FutureWarning, stacklevel=4)
if reduce:
if result_type is not None:
raise ValueError(
"cannot pass both reduce=True and result_type")
result_type = 'reduce'
self.result_type = result_type
# curry if needed
if ((kwds or args) and
not isinstance(func, (np.ufunc, compat.string_types))):
def f(x):
return func(x, *args, **kwds)
else:
f = func
self.f = f
# results
self.result = None
self.res_index = None
self.res_columns = None
@property
def columns(self):
return self.obj.columns
@property
def index(self):
return self.obj.index
@cache_readonly
def values(self):
return self.obj.values
@cache_readonly
def dtypes(self):
return self.obj.dtypes
@property
def agg_axis(self):
return self.obj._get_agg_axis(self.axis)
def get_result(self):
""" compute the results """
# dispatch to agg
if is_list_like(self.f) or is_dict_like(self.f):
return self.obj.aggregate(self.f, axis=self.axis,
*self.args, **self.kwds)
# all empty
if len(self.columns) == 0 and len(self.index) == 0:
return self.apply_empty_result()
# string dispatch
if isinstance(self.f, compat.string_types):
# Support for `frame.transform('method')`
# Some methods (shift, etc.) require the axis argument, others
# don't, so inspect and insert if necessary.
func = getattr(self.obj, self.f)
sig = compat.signature(func)
if 'axis' in sig.args:
self.kwds['axis'] = self.axis
return func(*self.args, **self.kwds)
# ufunc
elif isinstance(self.f, np.ufunc):
with np.errstate(all='ignore'):
results = self.f(self.values)
return self.obj._constructor(data=results, index=self.index,
columns=self.columns, copy=False)
# broadcasting
if self.result_type == 'broadcast':
return self.apply_broadcast()
# one axis empty
elif not all(self.obj.shape):
return self.apply_empty_result()
# raw
elif self.raw and not self.obj._is_mixed_type:
return self.apply_raw()
return self.apply_standard()
def apply_empty_result(self):
"""
we have an empty result; at least 1 axis is 0
we will try to apply the function to an empty
series in order to see if this is a reduction function
"""
# we are not asked to reduce or infer reduction
# so just return a copy of the existing object
if self.result_type not in ['reduce', None]:
return self.obj.copy()
# we may need to infer
reduce = self.result_type == 'reduce'
from pandas import Series
if not reduce:
EMPTY_SERIES = Series([])
try:
r = self.f(EMPTY_SERIES, *self.args, **self.kwds)
reduce = not isinstance(r, Series)
except Exception:
pass
if reduce:
return self.obj._constructor_sliced(np.nan, index=self.agg_axis)
else:
return self.obj.copy()
def apply_raw(self):
""" apply to the values as a numpy array """
try:
result = reduction.reduce(self.values, self.f, axis=self.axis)
except Exception:
result = np.apply_along_axis(self.f, self.axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return self.obj._constructor(result,
index=self.index,
columns=self.columns)
else:
return self.obj._constructor_sliced(result,
index=self.agg_axis)
def apply_broadcast(self, target):
result_values = np.empty_like(target.values)
# axis which we want to compare compliance
result_compare = target.shape[0]
for i, col in enumerate(target.columns):
res = self.f(target[col])
ares = np.asarray(res).ndim
# must be a scalar or 1d
if ares > 1:
raise ValueError("too many dims to broadcast")
elif ares == 1:
# must match return dim
if result_compare != len(res):
raise ValueError("cannot broadcast result")
result_values[:, i] = res
# we *always* preserve the original index / columns
result = self.obj._constructor(result_values,
index=target.index,
columns=target.columns)
return result
def apply_standard(self):
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
# we cannot reduce using non-numpy dtypes,
# as demonstrated in gh-12244
if (self.result_type in ['reduce', None] and
not self.dtypes.apply(is_extension_type).any()):
# Create a dummy Series from an empty array
from pandas import Series
values = self.values
index = self.obj._get_axis(self.axis)
labels = self.agg_axis
empty_arr = np.empty(len(index), dtype=values.dtype)
dummy = Series(empty_arr, index=index, dtype=values.dtype)
try:
result = reduction.reduce(values, self.f,
axis=self.axis,
dummy=dummy,
labels=labels)
return self.obj._constructor_sliced(result, index=labels)
except Exception:
|
)
measure_to_view_map._registered_views = {name: view}
test_result_1 = measure_to_view_map.register_view(
view=view, timestamp=timestamp)
self.assertIsNone(test_result_1)
self.assertIsNotNone(measure_to_view_map.
_measure_to_view_data_list_map[view.measure.name])
def test_register_view_with_exporter(self):
exporter = mock.Mock()
name = "testView"
description = "testDescription"
columns = mock.Mock()
measure = MeasureInt("measure", "description", "1")
aggregation = mock.Mock()
view = View(
name=name,
description=description,
columns=columns,
measure=measure,
aggregation=aggregation)
timestamp = mock.Mock()
measure_to_view_map = measure_to_view_map_module.MeasureToViewMap()
measure_to_view_map.exporters.append(exporter)
measure_to_view_map._registered_views = {}
measure_to_view_map._registered_measures = {}
measure_to_view_map.register_view(view=view, timestamp=timestamp)
self.assertIsNone(measure_to_view_map.exported_views)
self.assertEqual(measure_to_view_map._registered_views[view.name],
view)
self.assertEqual(
measure_to_view_map._registered_measures[measure.name], measure)
self.assertIsNotNone(measure_to_view_map.
_measure_to_view_data_list_map[view.measure.name])
def test_record(self):
measure_name = "test_measure"
measure_description = "test_description"
measure = BaseMeasure(
name=measure_name, description=measure_description)
view_name = "test_view"
view_description = "test_description"
view_columns = ["testTag1", "testColumn2"]
view_measure = measure
view_aggregation = mock.Mock()
View(name=view_name,
description=view_description,
columns=view_columns,
measure=view_measure,
aggregation=view_aggregation)
measure_value = 5
tags = {"testTag1": "testTag1Value"}
measurement_map = {measure: measure_value}
timestamp = mock.Mock()
measure_to_view_map = measure_to_view_map_module.MeasureToViewMap()
measure_to_view_map._registered_measures = {}
record = measure_to_view_map.record(
tags=tags,
measurement_map=measurement_map,
timestamp=timestamp,
attachments=None)
self.assertNotEqual(
measure,
measure_to_view_map._registered_measures.get(measure.name))
self.assertIsNone(record)
measure_to_view_map._registered_measures = {measure.name: measure}
measure_to_view_map._measure_to_view_data_list_map = {}
record = measure_to_view_map.record(
tags=tags,
measurement_map=measurement_map,
timestamp=timestamp,
attachments=None)
self.assertEqual(
measure,
measure_to_view_map._registered_measures.get(measure.name))
self.assertIsNone(record)
measure_to_view_map._measure_to_view_data_list_map = {
measure.name: [mock.Mock()]
}
measure_to_view_map.record(
tags=tags,
measurement_map=measurement_map,
timestamp=timestamp,
attachments=None)
self.assertEqual(
measure,
measure_to_view_map._registered_measures.get(measure.name))
self.assertTrue(
measure.name in measure_to_view_map._measure_to_view_data_list_map)
measure_to_view_map._measure_to_view_data_list_map = {
"testing": [mock.Mock()]
}
measure_to_view_map.record(
tags=tags,
measurement_map=measurement_map,
timestamp=timestamp,
attachments=None)
self.assertTrue(measure.name not in measure_to_view_map.
_measure_to_view_data_list_map)
measure_to_view_map_mock = mock.Mock()
measure_to_view_map = measure | _to_view_map_mock
measure_to_view_map._registered_measures = {measure.name: measure}
measure_to_view_map._measure_to_view_data_list_map = mock.Mock()
measure_to_view_map.record(
tags=mock.Mock(), stats=mock.Mock(), timestamp=mock.Mock())
self.asse | rtEqual(
measure,
measure_to_view_map._registered_measures.get(measure.name))
self.assertIsNotNone(measure_to_view_map.view_datas)
self.assertTrue(measure_to_view_map_mock.record.called)
tags = {"testTag1": "testTag1Value"}
measurement_map = {}
measure_to_view_map = measure_to_view_map_module.MeasureToViewMap()
record = measure_to_view_map.record(
tags=tags,
measurement_map=measurement_map,
timestamp=timestamp,
attachments=None)
self.assertIsNone(record)
def test_record_negative_value(self):
"""Check that we warn and drop negative measures at record time."""
measure = mock.Mock()
view_data = mock.Mock()
measure_to_view_map = measure_to_view_map_module.MeasureToViewMap()
measure_to_view_map._registered_measures = {measure.name: measure}
measure_to_view_map._measure_to_view_data_list_map = {
measure.name: [view_data]
}
with self.assertRaises(AssertionError):
measure_to_view_map.record(
tags=mock.Mock(),
measurement_map={measure: -1},
timestamp=mock.Mock())
view_data.record.assert_not_called()
def test_record_with_exporter(self):
exporter = mock.Mock()
measure_name = "test_measure"
measure_description = "test_description"
measure = BaseMeasure(
name=measure_name, description=measure_description)
view_name = "test_view"
view_description = "test_description"
view_columns = ["testTag1", "testColumn2"]
view_measure = measure
view_aggregation = mock.Mock()
View(
name=view_name,
description=view_description,
columns=view_columns,
measure=view_measure,
aggregation=view_aggregation)
measure_value = 5
tags = {"testTag1": "testTag1Value"}
measurement_map = {measure: measure_value}
timestamp = mock.Mock()
measure_to_view_map = measure_to_view_map_module.MeasureToViewMap()
measure_to_view_map.exporters.append(exporter)
measure_to_view_map._registered_measures = {}
record = measure_to_view_map.record(
tags=tags, measurement_map=measurement_map, timestamp=timestamp)
self.assertNotEqual(
measure,
measure_to_view_map._registered_measures.get(measure.name))
self.assertIsNone(record)
def test_export(self):
exporter = mock.Mock()
view_data = []
measure_name = "test_measure"
measure_description = "test_description"
measure = BaseMeasure(
name=measure_name, description=measure_description)
view_name = "test_view"
view_description = "test_description"
view_columns = ["testTag1", "testColumn2"]
view_measure = measure
view_aggregation = mock.Mock()
View(
name=view_name,
description=view_description,
columns=view_columns,
measure=view_measure,
aggregation=view_aggregation)
measure_to_view_map = measure_to_view_map_module.MeasureToViewMap()
measure_to_view_map.exporters.append(exporter)
measure_to_view_map._registered_measures = {}
measure_to_view_map.export(view_data)
self.assertTrue(True)
def test_export_duplicates_viewdata(self):
"""Check that we copy view data on export."""
mtvm = measure_to_view_map_module.MeasureToViewMap()
exporter = mock.Mock()
mtvm.exporters.a |
<span class="attr_name">Dernière mise à jour:</span>
<span class="attr_val">5.3 jours</span>
</td>
<td>
<span class="attr_name">Faux:</span>
<span class="attr_val">Aucun</span>
</td>
</tr>
</table>
<pre class="snippet">
Content
</pre>
</td>
</tr>
<tr>
<td class="idx">1</td>
<td>
<table class="torrent_name_tbl">
<tr>
<td class="torrent_name">
<a href="/url">Should be the title</a>
</td>
</tr>
</table>
<table class="torrent_name_tbl">
<tr>
<td class="ttth">
<a onclick="fclck(this.href)" href="magnet:?xt=urn:btih:magnet&dn=Test"
title="Télécharger des liens Magnet">[magnet]</a>
</td>
<td class="ttth">
<a href="https://btcloud.io/manager?cmd=add&info_hash=hash"
target="_blank" title="Ajouter à BTCloud">[cloud]</a>
</td>
<td>
<span class="attr_name">Taille:</span>
<span class="attr_val">1 GB</span>
</td>
<td>
<span class="attr_name">Fichiers:</span>
<span class="attr_val">710</span>
</td>
<td>
<span class="attr_name">Téléchargements:</span>
<span class="attr_val">3</span>
</td>
<td>
<span class="attr_name">Temps:</span>
<span class="attr_val">417.8 jours</span>
</td>
<td>
<span class="attr_name">Dernière mise à jour:</span>
<span class="attr_val">5.3 jours</span>
</td>
<td>
<span class="attr_name">Faux:</span>
<span class="attr_val">Aucun</span>
</td>
</tr>
</table>
<pre class="snippet">
Content
</pre>
</td>
</tr>
<tr>
<td class="idx">1</td>
<td>
<table class="torrent_name_tbl">
<tr>
<td class="torrent_name">
<a href="/url">Should be the title</a>
</td>
</tr>
</table>
<table class="torrent_name_tbl">
<tr>
<td class="ttth">
<a onclick="fclck(this.href)" href="magnet:?xt=urn:btih:magnet&dn=Test"
title="Télécharger des liens Magnet">[magnet]</a>
</td>
<td class="ttth">
<a href="https://btcloud.io/manager?cmd=add&info_hash=hash"
target="_blank" title="Ajouter à BTCloud">[cloud]</a>
</td>
<td>
<span class="attr_name">Taille:</span>
<span class="attr_val">1 TB</span>
</td>
<td>
<span class="attr_name">Fichiers:</span>
<span class="attr_val">710</span>
</td>
<td>
<span class="attr_name">Téléchargements:</span>
<span class="attr_val">2</span>
</td>
<td>
<span class="attr_name">Temps:</span>
<span class="attr_val">417.8 jours</span>
</td> |
<td>
<span class="attr_name">Dernière mise à jour:</span>
<span class="attr_val">5.3 jours</span>
</td>
<td>
<span class="attr_name">Faux:</span>
<span class="attr_val">Aucun</span>
</td>
| </tr>
</table>
<pre class="snippet">
Content
</pre>
</td>
</tr>
<tr>
<td class="idx">1</td>
<td>
<table class="torrent_name_tbl">
<tr>
<td class="torrent_name">
<a href="/url">Should be the title</a>
</td>
</tr>
</table>
<table class="torrent_name_tbl">
<tr>
<td class="ttth">
<a onclick="fclck(this.href)" href="magnet:?xt=urn:btih:magnet&dn=Test"
title="Télécharger des liens Magnet">[magnet]</a>
</td>
<td class="ttth">
<a href="https://btcloud.io/manager?cmd=add&info_hash=hash"
target="_blank" title="Ajouter à BTCloud">[cloud]</a>
</td>
<td>
<span class="attr_name">Taille:</span>
<span class="attr_val">a TB</span>
</td>
<td>
<span class="attr_name">Fichiers:</span>
<span class="attr_val">710</span>
</td>
<td>
<span class="attr_name">Téléchargements:</span>
<span class="attr_val">z</span>
</td>
<td>
<span class="attr_name">Temps:</span>
<span class="attr_val">417.8 jours</span>
</td>
<td>
<span class="attr_name">Dernière mise à jour:</span>
|
from django.contrib import admin
from django.contrib.auth import get_user_model
class SignUpAdmin(admin.ModelAdmin):
| class Meta:
model = get_user_model()
admin.site. | register(get_user_model(), SignUpAdmin) |
"""Copyright 2012 TechInvestLab.ru dot15926@gmail.com
Redistribution and use in source and binary forms, with or without
modifi | cation, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaim | er in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.""" |
from behave import then, when
from bs4 import Bea | utifulSoup
from bs4.element import Tag
from pageobjects.pages import About, Welcome
@when(u'I instantiate the Welcome page object')
def new_pageobject(context):
context.page = Welcome(context)
@then(u'it provides a valid Beautiful Soup document')
def pageobject_works(context):
| assert context.page.response.status_code == 200
assert context.page.request == context.page.response.request
assert isinstance(context.page.document, BeautifulSoup)
assert 'Test App: behave-django' == context.page.document.title.string, \
"unexpected title: %s" % context.page.document.title.string
@then(u'get_link() returns the link subdocument')
def getlink_subdocument(context):
context.about_link = context.page.get_link('about')
assert isinstance(context.about_link, Tag), \
"should be instance of %s (not %s)" % (
Tag.__name__, context.about_link.__class__.__name__)
@when('I call click() on the link')
def linkelement_click(context):
context.next_page = context.about_link.click()
@then('it loads a new PageObject')
def click_returns_pageobject(context):
assert About(context) == context.next_page
|
#!/usr/bin/env python
"""
@package ion.agents.platform.util.node_configuration
@file ion/agents/platform/util/node_configuration.py
@author Mike Harrington
@brief read node configuration files
"""
__author__ = 'Mike Harrington'
__license__ = 'Apache 2.0'
from ooi.logging import log
from mi.platform.exceptions import NodeConfigurationFileException
from mi.platform.util.NodeYAML import NodeYAML
import yaml
import logging
class NodeConfiguration(object):
"""
Various utilities utilities for reading in node configuration yaml files.
"""
def __init__(self):
self._node_yaml = NodeYAML.factory(None)
@property
def node_meta_data(self):
return self._node_yaml.node_meta_data
@property
def node_streams(self):
return self._node_yaml.node_streams
@property
def node_port_info(self):
return self._node_yaml.node_port_info
def openNode(self,platform_id,node_config_filename):
"""
Opens up and parses the node configuration files.
@param platform_id - id to associate with this set of Node Configuration Files
@param node_config_file - yaml file with information about the platform
@raise NodeConfigurationException
"""
self._platform_id = platform_id
log.debug("%r: Open: %s", self._platform_id, node_config_filename)
try:
with open(node_config_filename, 'r') as node_config_file:
try:
node_config = yaml.load(node_config_file)
except Exception as e:
raise NodeConfigurationFileException(msg="%s Cannot parse yaml node specific config file : %s" % (str(e),node_config_filename))
except Exception as e:
raise NodeConfigurationFileException(msg="%s Cannot open node specific config file : %s" % (str(e),node_config_filename))
self._node_yaml = NodeYAML.factory(node_config)
self._node_yaml.validate()
def Print(self):
log.debug("%r Print Config File Information for: %s\n\n", self._platform_id, self.node_meta_data['node_id_name'])
log.debug("%r Node Meta data", self._platform_id)
for meta_data_key,meta_data_item in sorted(self.node_meta_data.iteritems()):
log.debug("%r %r = %r", self._platform_id, meta_data_key,meta_data_item)
log.debug("%r Node Port Info", self._platform_id)
for port_data_key,port_data_item in sorted(self.node_port_info.iteritems()):
log.debug("%r %r = %r", self._p | latform_id, port_data_key,port_data_item)
log.debug("%r Node stream Info", self._platf | orm_id)
for stream_data_key,stream_data_item in sorted(self.node_streams.iteritems()):
log.debug("%r %r = %r", self._platform_id, stream_data_key,stream_data_item)
|
from webplot import p
p.use_doc('webplot example')
import numpy as np
import datetime
import time
x = np.arange(100) / 6.0
y = np.sin(x)
z = np.cos(x)
data_source = p.make_source(idx=range(100), x=x, y=y, z=z)
p.plot(x, y, 'orange')
p.figure()
p.plot('x', 'y', color='blue', data_source=data_source, title='sincos')
p.plot('x', 'z', color='gree | n')
p.figure()
p.plot('x', 'y', data_source=data_source)
p.figure()
p.plot('x', 'z', data_source=data_source)
p.figure()
p.table(data_source, ['x', 'y', 'z'])
p.scatter('x', 'y', data_source=data_source)
p.figure()
p.scatter('x', 'z', data_source=data_source)
p.figure()
p.hold(False)
p.scatter('x', 'y', 'orange', da | ta_source=data_source)
p.scatter('x', 'z', 'red', data_source=data_source)
p.plot('x', 'z', 'yellow', data_source=data_source)
p.plot('x', 'y', 'black', data_source=data_source)
print "click on the plots tab to see results"
|
ROLLBAR-VE',
'SCROLLBAR-VER',
'SCROLLBAR-VERT',
'SCROLLBAR-VERTI',
'SCROLLBAR-VERTIC',
'SCROLLBAR-VERTICA',
'SCROLL-DELTA',
'SCROLLED-ROW-POSITION',
'SCROLLED-ROW-POS',
'SCROLLED-ROW-POSI',
'SCROLLED-ROW-POSIT',
'SCROLLED-ROW-POSITI',
'SCROLLED-ROW-POSITIO',
'SCROLLING',
'SCROLL-OFFSET',
'SCROLL-TO-CURRENT-ROW',
'SCROLL-TO-ITEM',
'SCROLL-TO-I',
'SCROLL-TO-IT',
'SCROLL-TO-ITE',
'SCROLL-TO-SELECTED-ROW',
'SDBNAME',
'SEAL',
'SEAL-TIMESTAMP',
'SEARCH',
'SEARCH-SELF',
'SEARCH-TARGET',
'SECTION',
'SECURITY-POLICY',
'SEEK',
'SELECT',
'SELECTABLE',
'SELECT-ALL',
'SELECTED',
'SELECT-FOCUSED-ROW',
'SELECTION',
'SELECTION-END',
'SELECTION-LIST',
'SELECTION-START',
'SELECTION-TEXT',
'SELECT-NEXT-ROW',
'SELECT-PREV-ROW',
'SELECT-ROW',
'SELF',
'SEND',
'send-sql-statement',
'send-sql',
'SENSITIVE',
'SEPARATE-CONNECTION',
'SEPARATOR-FGCOLOR',
'SEPARATORS',
'SERVER',
'SERVER-CONNECTION-BOUND',
'SERVER-CONNECTION-BOUND-REQUEST',
'SERVER-CONNECTION-CONTEXT',
'SERVER-CONNECTION-ID',
'SERVER-OPERATING-MODE',
'SESSION',
'SESSION-ID',
'SET',
'SET-APPL-CONTEXT',
'SET-ATTR-CALL-TYPE',
'SET-ATTRIBUTE-NODE',
'SET-BLUE-VALUE',
'SET-BLUE',
'SET-BLUE-',
'SET-BLUE-V',
'SET-BLUE-VA',
'SET-BLUE-VAL',
'SET-BLUE-VALU',
'SET-BREAK',
'SET-BUFFERS',
'SET-CALLBACK',
'SET-CLIENT',
'SET-COMMIT',
'SET-CONTENTS',
'SET-CURRENT-VALUE',
'SET-DB-CLIENT',
'SET-DYNAMIC',
'SET-EVENT-MANAGER-OPTION',
'SET-GREEN-VALUE',
'SET-GREEN',
'SET-GREEN-',
'SET-GREEN-V',
'SET-GREEN-VA',
'SET-GREEN-VAL',
'SET-GREEN-VALU',
'SET-INPUT-SOURCE',
'SET-OPTION',
'SET-OUTPUT-DESTINATION',
'SET-PARAMETER',
'SET-POINTER-VALUE',
'SET-PROPERTY',
'SET-RED-VALUE',
'SET-RED',
'SET-RED-',
'SET-RED-V',
'SET-RED-VA',
'SET-RED-VAL',
'SET-RED-VALU',
'SET-REPOSITIONED-ROW',
'SET-RGB-VALUE',
'SET-ROLLBACK',
'SET-SELECTION',
'SET-SIZE',
'SET-SORT-ARROW',
'SETUSERID',
'SETUSER',
'SETUSERI',
'SET-WAIT-STATE',
'SHA1-DIGEST',
'SHARED',
'SHARE-LOCK',
'SHARE',
'SHARE-',
'SHARE-L',
'SHARE-LO',
'SHARE-LOC',
'SHOW-IN-TASKBAR',
'SHOW-STATS',
'SHOW-STAT',
'SIDE-LABEL-HANDLE',
'SIDE-LABEL-H',
'SIDE-LABEL-HA',
'SIDE-LABEL-HAN',
'SIDE-LABEL-HAND',
'SIDE-LABEL-HANDL',
'SIDE-LABELS',
'SIDE-LAB',
'SIDE-LABE',
'SIDE-LABEL',
'SILENT',
'SIMPLE',
'SINGLE',
'SIZE',
'SIZE-CHARS',
'SIZE-C',
'SIZE-CH',
'SIZE-CHA',
'SIZE-CHAR',
'SIZE-PIXELS',
'SIZE-P',
'SIZE-PI',
'SIZE-PIX',
'SIZE-PIXE',
'SIZE-PIXEL',
'SKIP',
'SKIP-DELETED-RECORD',
'SLIDER',
'SMALL-ICON',
'SMALLINT',
'SMALL-TITLE',
'SOME',
'SORT',
'SORT-ASCENDING',
'SORT-NUMBER',
'SOURCE',
'SOURCE-PROCEDURE',
'SPACE',
'SQL',
'SQRT',
'SSL-SERVER-NAME',
'STANDALONE',
'START',
'START-DOCUMENT',
'START-ELEMENT',
'START-MOVE',
'START-RESIZE',
'START-ROW-RESIZE',
'STATE-DETAIL',
'STATIC',
'STATUS',
'STATUS-AREA',
'STATUS-AREA-FONT',
'STDCALL',
'STOP',
'STOP-PARSING',
'STOPPED',
'STOPPE',
'STORED-PROCEDURE',
'STORED-PROC',
'STORED-PROCE',
'STORED-PROCED',
'STORED-PROCEDU',
'STORED-PROCEDUR',
'STREAM',
'STREAM-HANDLE',
'STREAM-IO',
'STRETCH-TO-FIT',
'STRICT',
'STRING',
'STRING-VALUE',
'STRING-XREF',
'SUB-AVERAGE',
'SUB-AVE',
'SUB-AVER',
'SUB-AVERA',
'SUB-AVERAG',
'SUB-COUNT',
'SUB-MAXIMUM',
'SUM-MAX',
'SUM-MAXI',
'SUM-MAXIM',
'SUM-MAXIMU',
'SUB-MENU',
'SUBSUB-',
'SUB-MIN',
'SUBSCRIBE',
'SUBSTITUTE',
'SUBST',
'SUBSTI',
'SUBSTIT',
'SUBSTITU',
'SUBSTITUT',
'SUBSTRING',
'SUBSTR',
'SUBSTRI',
'SUBSTRIN',
'SUB-TOTAL',
'SUBTYPE',
'SUM',
'SUPER',
'SUPER-PROCEDURES',
'SUPPRESS-NAMESPACE-PROCESSING',
'SUPPRESS-WARNINGS',
'SUPPRESS-W',
'SUPPRESS-WA',
'SUPPRESS-WAR',
'SUPPRESS-WARN',
'SUPPRESS-WARNI',
'SUPPRESS-WARNIN',
'SUPPRESS-WARNING',
'SYMMETRIC-ENCRYPTION-ALGORITHM',
'SYMMETRIC-ENCRYPTION-IV',
'SYMMETRIC-ENCRYPTION-KEY',
'SYMMETRIC-SUPPORT',
'SYSTEM-ALERT-BOXES',
'SYSTEM-ALERT',
'SYSTEM-ALERT-',
'SYSTEM-ALERT-B',
'SYSTEM-ALERT-BO',
'SYSTEM-ALERT-BOX',
'SYSTEM-ALERT-BOXE',
'SYSTEM-DIALOG',
'SYSTEM-HELP',
'SYSTEM-ID',
'TABLE',
'TABLE-HANDLE',
'TABLE-NUMBER',
'TAB-POSITION',
'TAB-STOP',
'TARGET',
'TARGET-PROCEDURE',
'TEMP-DIRECTORY',
'TEMP-DIR',
'TEMP-DIRE',
'TEMP-DIREC',
'TEMP-DIRECT',
'TEMP-DIRECTO',
'TEMP-DIRECTOR',
'TEMP-TABLE',
'TEMP-TABLE-PREPARE',
'TERM',
'TERMINAL',
'TERMI',
'TERMIN',
'TERMINA',
'TERMINATE',
'TEXT',
'TEXT-CURSOR',
'TEXT-SEG-GROW',
'TEXT-SELECTED',
'THEN',
'THIS-OBJECT',
'THIS-PROCEDURE',
'THREE-D',
'THROW',
'THROUGH',
'THRU',
'TIC-MARKS',
'TIME',
'TIME-SOURCE',
'TITLE',
'TITLE-BGCOLOR',
'TITLE-BGC',
'TITLE-BGCO',
'TITLE-BGCOL',
'TITLE-BGCOLO',
'TITLE-DCOLOR',
'TITLE-DC',
'TITLE-DCO',
'TITLE-DCOL',
'TITLE-DCOLO',
'TITLE-FGCOLOR',
'TITLE-FGC',
'TITLE-FGCO',
'TITLE-FGCOL',
'TITLE-FGCOLO',
'TITLE-FONT',
'TITLE-FO',
'TITLE-FON',
'TO',
'TODAY',
'TOGGLE-BOX',
'TOOLTIP',
'TOOLTIPS',
'TOPIC',
'TOP-NAV-QUERY',
'TOP-ONLY',
'TO-ROWID',
'TOTAL',
'TRAILING',
'TRANS',
'TRANSACTION',
'TRANSACTION-MODE',
'TRANS-INIT-PROCEDURE',
'TRANSPARENT',
'TRIGGER',
'TRIGGERS',
'TRIM',
'TRUE',
'TRUNCATE',
'TRUNC',
'TRUNCA',
'TRUNCAT',
'TYPE',
'TYPE-OF',
'UNBOX',
'UNBUFFERED',
'UNBUFF',
'UNBUFFE',
'UNBUFFER',
'UNBUFFERE',
'UNDERLINE',
'UNDERL',
'UNDERLI',
'UNDERLIN',
'UNDO',
'UNFORMATTED',
'UNFORM',
'UNFORMA',
'UNFORMAT',
'UNFORMATT',
'UNFORMATTE',
'UNION',
'UNIQUE',
'UNIQUE-ID',
'UNIQUE-MATCH',
'UNIX',
'UNLESS-HIDDEN',
'UNLOAD',
'UNSIGNED-LONG',
'UNSUBSCRIBE',
'UP',
'UPDATE',
'UPDATE-ATTRIBUTE',
'URL',
'URL-DECODE',
'URL-ENCODE',
'URL-PASSWORD',
'URL-USERID',
'USE',
'USE-DICT-EXPS',
'USE-FILENAME',
'USE-INDEX',
'USER',
'USE-REVVIDEO',
'USERID',
'USER-ID',
'USE-TEXT',
'USE-UNDERLINE',
'USE-WIDGET-POOL',
'USING',
'V6DISPLAY',
'V6FRAME',
'VALIDATE',
'VALIDATE-EXPRESSION',
'VALIDATE-MESSAGE',
'VALIDATE-SEAL',
'VALIDATION-ENABLED',
'VALID-EVENT',
'VALID-HANDLE',
'VALID-OBJECT',
'VALUE',
'VALUE-CHANGED',
'VALUES',
'VARIABLE',
'VAR',
'VARI',
'VARIA',
'VARIAB',
'VARIABL',
'VERBOSE',
'VERSION',
'VERTICAL',
'VERT',
'VERTI',
'VERTIC',
'VERTICA',
'VIEW',
'VIEW-AS',
' | VIEW-FIRST-COLUMN-ON-REOPEN',
'VIRTUAL-HEIGHT-CHARS',
'VIRTUAL-HEIGHT',
'VIRTUAL-HEIGHT-',
'VIRTUAL-HEIGHT-C',
'VIRTUAL-HEIGHT-CH',
'VIRTUAL-HEIGHT-CHA',
'VIRTUAL-HEIGHT-CHAR',
'VIRTUAL-HEIGHT-PIXELS',
'VIRTUAL-HEIGHT-P',
'VIRTUAL-HEIGHT-PI',
'VIRTUAL-HEIGHT-PIX',
'VIRTUAL-HEIGHT-PIXE',
'VIRTUAL-HEIGHT-PIXEL',
'VIRTUAL-WIDTH-CHARS',
'VIRTUAL-WIDTH',
'VIRTUAL- | WIDTH-',
'VIRTUAL-WIDTH-C',
'VIRTUAL-WIDTH-CH',
'VIRTUAL-WIDTH-CHA',
'VIRTUAL-WIDTH-CHAR',
'VIRTUAL-WIDTH-PIXELS',
'VIRTUAL-WIDTH-P',
'VIRTUAL-WIDTH-PI',
'VIRTUAL-WIDTH-PIX',
'VIRTUAL-WIDTH-PIXE',
'VIRTUAL-WIDTH-PIXEL',
'VISIBLE',
'VOID',
'WAIT',
'WAIT-FOR',
'WARNING',
'WEB-CONTEXT',
'WEEKDAY',
'W |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
import zipfile
import config
import store
import common
from db import db_session, Source
import crypto_util
# Set environment variable so config.py uses a test environment
os.environ['SECUREDROP_ENV'] = 'test'
class TestStore(unittest.TestCase):
"""The set of tests for store.py."""
def setUp(self):
common.shared_setup()
def tearDown(self):
common.shared_teardown()
def test_verify(self):
with self.assertRaises(store.PathException):
store.verify(os.path.join(config.STORE_DIR, '..', 'etc', 'passwd'))
with self.assertRaises(store.PathException):
store.verify(config.STORE_DIR + "_backup")
def test_get_zip(self):
sid = 'EQZGCJBRGISGOTC2NZVWG6LILJBHEV3CINNEWSCLLFTUWZJPKJFECLS2NZ4G4U3QOZCFKTTPNZMVIWDCJBBHMUDBGFHXCQ3R'
source = Source(sid, crypto_uti | l.display_id())
db_session.add(source)
db_session.commit()
files = ['1-abc1-msg.gpg', '2-abc2-msg.gpg']
filenames = common.setup_test_docs(sid, files)
archive = zipfile.ZipFile(store.get_bulk_archive(filenames))
archivefile_contents = archive.namelist()
for archived_file, actual_file in zip(archivefile_contents, filenames):
actual_fil | e_content = open(actual_file).read()
zipped_file_content = archive.read(archived_file)
self.assertEquals(zipped_file_content, actual_file_content)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
# http://lodev.org/cgtutor/filtering.html
import cv2
import numpy as np
#img = cv2.imread('../images/input_sharp_edges.jpg', cv2.IMREAD_GRAYSCALE)
img = cv2.imread('../images/input_tree.jpg')
rows, cols = img.shape[:2]
#cv2.imshow('Original', img)
###################
# Motion Blur
size = 15
kernel_motion_blur = np.zeros((size, size))
kernel_motion_blur[int((size-1)/2), :] = np.ones(size)
kernel_motion_blur = kernel_motion_blur / size
output = cv2.filter2D(img, -1, kernel_motion_blur)
#cv2.imshow('Motion Blur', output)
###################
# Sharpening
kernel_sharpen_1 = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
kernel_sharpen_2 = np.array([[1,1,1], [1,-7,1], [1,1,1]])
kernel_sharpen_3 = np.array([[-1,-1,-1,-1,-1],
[-1,2,2,2,-1],
[-1,2,8,2,-1],
[-1,2,2,2,-1],
[-1,-1,-1,-1,-1]]) / 8.0
output_1 = cv2.filter2D(img, -1, kernel_sharpen_1)
output_2 = cv2.filter2D(img, -1, kernel_sharpen_2)
output_3 = cv2.filter2D(img, -1, kernel_sharpen_3)
#cv2.imshow('Sharpening', output_1)
#cv2.imshow('Excessive Sharpening', output_2)
#cv2.imshow('Edge Enhancement', output_3)
###################
# Embossing
img_emboss_input = cv2.imread('../images/input_house.jpg')
kernel_emboss_1 = np.array([[0,-1,-1],
[1,0,-1],
[1,1,0]])
kernel_emboss_2 = np.array([[-1,-1,0],
[-1,0,1],
| [0,1,1]])
kernel_emboss_3 = np.array([[1,0,0],
[0,0,0],
[0,0,-1]])
gray_img = cv2.cvtColor(img_emboss_input,cv2.COLOR_BGR2GRAY)
output_1 = cv2.filter2D(gray_img, -1, kernel_emboss_1)
output_2 = cv2.filter2D(gray_img, -1, kernel_emboss_2)
output_3 = cv2.filter2D(gray_img, -1, kernel_embos | s_3)
cv2.imshow('Input', img_emboss_input)
cv2.imshow('Embossing - South West', output_1 + 128)
cv2.imshow('Embossing - South East', output_2 + 128)
cv2.imshow('Embossing - North West', output_3 + 128)
###################
# Erosion and dilation
img = cv2.imread('../images/input_morphology.png',0)
kernel = np.ones((5,5), np.uint8)
img_erosion = cv2.erode(img, kernel, iterations=1)
img_dilation = cv2.dilate(img, kernel, iterations=1)
#cv2.imshow('Input', img)
#cv2.imshow('Erosion', img_erosion)
#cv2.imshow('Dilation', img_dilation)
cv2.waitKey()
|
'06 - Otras Deducciones Admitidas'),
('07', '07 - Gastos Financieros'),
('08', '08 - Gastos Extraordinarios'),
('09', '09 - Compras y Gastos que forman parte del Costo de Venta'),
('10', '10 - Adquisiciones de Activos'),
('11', '11 - Gastos de Seguro')
]
def on_change_fiscal_position(self, cr, uid, ids, value):
fiscal_type = self.pool.get("account.fiscal.position").browse(cr, uid, value).fiscal_type
if fiscal_type in [u'informal', u'minor']:
ncf_required = False
else:
ncf_required = True
return {"value": {'reference_type': fiscal_type, 'ncf_required': ncf_required}}
def onchange_journal_id(self, cr, uid, ids, *args):
if args:
journal = self.pool.get("account.journal").browse(cr, uid, args[0])
ncf_required = True
if journal.ncf_special:
ncf_required = False
return {"value": {'ncf_required': ncf_required}}
else:
return {"value": {}}
def onchange_reference(self, cr, uid, ids, reference, ncf_required):
if not is_ncf(reference.encode("ascii")) and ncf_required:
raise except_osv(u"NCF Invalido!", u"El NCF del proveedor no es válido!")
return False
def action_date_assign(self, cr, uid, ids, *args):
for inv in self.browse(cr, uid, ids):
if inv.journal_id.ncf_special in ['gasto', 'informal']:
self.write(cr, uid, [inv.id], {"reference": False})
if inv.type in ['in_invoice', 'in_refund'] and inv.ncf_required:
if inv.reference_type != 'none' and not is_ncf(inv.reference.encode("ascii")):
raise except_osv(u"NCF Invalido!", u"El NCF del proveedor no es válido!")
# TODO si la entrada de almacen referente a este pedido advertir al contador que debe terminar de recibir
# los productos pendientes o cancelarlos en caso de que se reciba parciarmente debe crear una nota de credito
# borrador
res = self.onchange_payment_term_date_invoice(cr, uid, inv.id, inv.payment_term.id, inv.date_invoice)
if res and res['value']:
self.write(cr, uid, [inv.id], res['value'])
return True
_columns = {
'reference_type': fields.selection(_get_reference_type, 'Payment Reference',
required=True, readonly=False),
'reference': fields.char('Invoice Reference', size=19, help="The partner reference of this invoice."),
'ipf': fields.boolean("Impreso", readonly=True),
'ncf_required': fields.boolean(),
"pay_to": fields.many2one("res.partner", "Pagar a")
}
_sql_constraints = [
# ('number_uniq', 'unique(number, company_id, journal_id, type)', 'Invoice Number must be unique per Company!')
('number_uniq', 'unique(company_id, partner_id, number, journal_id)', u'El NCF para este relacionado ya fue utilizado!'),
]
_defaults = {
"ncf_required": True
}
def _get_journal_id(self, fiscal_type, shop_id, refund):
if refund:
return shop_id.notas_credito_id.id
elif fiscal_type == "final" or fiscal_type is None:
return shop_id.final_id.id
elif fiscal_type == "fiscal":
return shop_id.fiscal_id.id
elif fiscal_type == "special":
return shop_id.especiales_id.id
elif fiscal_type == "gov":
return shop_id.gubernamentales_id.id
else:
return False
def create(self, cr, uid, vals, context=None):
if not context:
context = {}
if context.get('active_model', False) == 'pos.order' and vals.get('type', False) in ["out_invoice", 'out_refund']:
pass
elif context.get('active_model', False) == 'stock.picking.in' and vals.get('type', False) == "out_refund":
pass
elif vals.get('type', False) == "out_invoice":
order_obj = self.pool.get('sale.order')
so_id = order_obj.search(cr, uid, [('name', '=', vals['origin'])])
so = order_obj.browse(cr, uid, so_id, context)[0]
if not vals['fiscal_position']: vals['fiscal_position'] = 2
fiscal_type = so.partner_id.property_account_position.fiscal_type or 'final'
vals['journal_id'] = self._get_journal_id(fiscal_type, so.shop_id, False)
elif vals.get('type', False) == "out_refund":
if vals.get('origin', False):
order_obj = self.pool.get('sale.order')
so_id = order_obj.search(cr, uid, [('name', '=', vals.get('origin', None))])
so = order_obj.browse(cr, uid, so_id, context)[0]
if not vals['fiscal_position']:
vals['fiscal_position'] = 2
vals['journal_id'] = self._get_journal_id(None, so.shop_id, True)
else:
vals['reference'] = u""
inv_obj = self.pool.get('account.invoice')
origin = inv_obj.read(cr, uid, context['active_id'], ['number'])
vals['origin'] = origin["number"]
elif vals.get('type', False) == "in_invoice" and vals.get('fiscal_position', False):
fiscal_type = self.pool.get("account.fiscal.position").browse(cr, uid, vals['fiscal_position']).fiscal_type
vals['reference_type'] = fiscal_type
elif vals.get('type', False) == "in_refund" and vals.get('fiscal_position', False):
vals['reference'] = vals.get('origin', "")
fiscal_type = self.pool.get("account.fiscal.position").browse(cr, uid, vals['fiscal_position']).fiscal_type
vals['reference_type'] = fiscal_type
inv = super(account_invoice, self).create(cr, uid, vals, context)
return inv
# go from canceled state to draft state
def action_cancel_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state': 'draft', 'internal_number': False})
wf_servi | ce = netsvc.LocalService("workflow")
for | inv_id in ids:
wf_service.trg_delete(uid, 'account.invoice', inv_id, cr)
wf_service.trg_create(uid, 'account.invoice', inv_id, cr)
return True
def _refund_cleanup_lines(self, cr, uid, lines, context=None):
"""
For each invoice line.
If amount of days since invoice is greater than 30.
For each tax on each invoice line.
If the tax is included in the price.
The tax is replaced with the corresponding tax exempt tax.
If tax is not include in price, no tax will show up in the refund.
"""
result = super(account_invoice, self)._refund_cleanup_lines(cr, uid, lines, context=context)
# For each invoice_line
for x, y, line in result:
inv_obj = self.pool.get('account.invoice').browse(cr, uid, line['invoice_id'], context=context)
inv_date = datetime.strptime(inv_obj['date_invoice'], "%Y-%m-%d").date()
days_diff = datetime.today().date() - inv_date
# If amount of days since invoice is greater than 30:
if days_diff.days > 30:
taxes_ids = []
# For each tax on the invoice line:
for tax_id in line['invoice_line_tax_id'][0][2]:
tax_original = self.pool.get('account.tax').browse(cr, uid, tax_id, context=context)
# If the tax is included in the price:
if tax_original.price_include:
# Replace it with the corresponding tax exempt tax.
tax_replacement = self.pool.get('account.tax').search(cr, uid,
[('type_tax_use', '=', tax_original.type_tax_use),
('amount', '=', tax_original.amount),
|
name is equal to shard_id.
Shard state contains critical state to ensure the correctness of
shard e | xecution. It is the single source of truth about a shard's
progress. For example:
1. A slice is allowed to run only if its payload matches shard state's
expectation.
| 2. A slice is considered running only if it has acquired the shard's lock.
3. A slice is considered done only if it has successfully committed shard
state to db.
Properties about the shard:
active: if we have this shard still running as boolean.
counters_map: shard's counters map as CountersMap. All counters yielded
within mapreduce are stored here.
mapreduce_id: unique id of the mapreduce.
shard_id: unique id of this shard as string.
shard_number: ordered number for this shard.
retries: the number of times this shard has been retried.
result_status: If not None, the final status of this shard.
update_time: The last time this shard state was updated.
shard_description: A string description of the work this shard will do.
last_work_item: A string description of the last work item processed.
writer_state: writer state for this shard. The shard's output writer
instance can save in-memory output references to this field in its
"finalize" method.
Properties about slice management:
slice_id: slice id of current executing slice. A slice's task
will not run unless its slice_id matches this. Initial
value is 0. By the end of slice execution, this number is
incremented by 1.
slice_start_time: a slice updates this to now at the beginning of
execution. If the transaction succeeds, the current task holds
a lease of slice duration + some grace period. During this time, no
other task with the same slice_id will execute. Upon slice failure,
the task should try to unset this value to allow retries to carry on
ASAP.
slice_request_id: the request id that holds/held the lease. When lease has
expired, new request needs to verify that said request has indeed
ended according to logs API. Do this only when lease has expired
because logs API is expensive. This field should always be set/unset
with slice_start_time. It is possible Logs API doesn't log a request
at all or doesn't log the end of a request. So a new request can
proceed after a long conservative timeout.
slice_retries: the number of times a slice has been retried due to
processing data when lock is held. Taskqueue/datastore errors
related to slice/shard management are not counted. This count is
only a lower bound and is used to determined when to fail a slice
completely.
acquired_once: whether the lock for this slice has been acquired at
least once. When this is True, duplicates in outputs are possible.
"""
RESULT_SUCCESS = "success"
RESULT_FAILED = "failed"
# Shard can be in aborted state when user issued abort, or controller
# issued abort because some other shard failed.
RESULT_ABORTED = "aborted"
_RESULTS = frozenset([RESULT_SUCCESS, RESULT_FAILED, RESULT_ABORTED])
# Maximum number of shard states to hold in memory at any time.
_MAX_STATES_IN_MEMORY = 10
# Functional properties.
mapreduce_id = db.StringProperty(required=True)
active = db.BooleanProperty(default=True, indexed=False)
counters_map = json_util.JsonProperty(
CountersMap, default=CountersMap(), indexed=False)
result_status = db.StringProperty(choices=_RESULTS, indexed=False)
retries = db.IntegerProperty(default=0, indexed=False)
writer_state = json_util.JsonProperty(dict, indexed=False)
slice_id = db.IntegerProperty(default=0, indexed=False)
slice_start_time = db.DateTimeProperty(indexed=False)
slice_request_id = db.ByteStringProperty(indexed=False)
slice_retries = db.IntegerProperty(default=0, indexed=False)
acquired_once = db.BooleanProperty(default=False, indexed=False)
# For UI purposes only.
update_time = db.DateTimeProperty(auto_now=True, indexed=False)
shard_description = db.TextProperty(default="")
last_work_item = db.TextProperty(default="")
def __str__(self):
kv = {"active": self.active,
"slice_id": self.slice_id,
"last_work_item": self.last_work_item,
"update_time": self.update_time}
if self.result_status:
kv["result_status"] = self.result_status
if self.retries:
kv["retries"] = self.retries
if self.slice_start_time:
kv["slice_start_time"] = self.slice_start_time
if self.slice_retries:
kv["slice_retries"] = self.slice_retries
if self.slice_request_id:
kv["slice_request_id"] = self.slice_request_id
if self.acquired_once:
kv["acquired_once"] = self.acquired_once
keys = kv.keys()
keys.sort()
result = "ShardState is {"
for k in keys:
result += k + ":" + str(kv[k]) + ","
result += "}"
return result
def reset_for_retry(self):
"""Reset self for shard retry."""
self.retries += 1
self.last_work_item = ""
self.active = True
self.result_status = None
self.counters_map = CountersMap()
self.slice_id = 0
self.slice_start_time = None
self.slice_request_id = None
self.slice_retries = 0
self.acquired_once = False
def advance_for_next_slice(self, recovery_slice=False):
"""Advance self for next slice.
Args:
recovery_slice: True if this slice is running recovery logic.
See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery
for more info.
"""
self.slice_start_time = None
self.slice_request_id = None
self.slice_retries = 0
self.acquired_once = False
if recovery_slice:
self.slice_id += 2
else:
self.slice_id += 1
def set_for_failure(self):
self.active = False
self.result_status = self.RESULT_FAILED
def set_for_abort(self):
self.active = False
self.result_status = self.RESULT_ABORTED
def set_for_success(self):
self.active = False
self.result_status = self.RESULT_SUCCESS
self.slice_start_time = None
self.slice_request_id = None
self.slice_retries = 0
self.acquired_once = False
def copy_from(self, other_state):
"""Copy data from another shard state entity to self."""
for prop in self.properties().values():
setattr(self, prop.name, getattr(other_state, prop.name))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.properties() == other.properties()
def get_shard_number(self):
"""Gets the shard number from the key name."""
return int(self.key().name().split("-")[-1])
shard_number = property(get_shard_number)
def get_shard_id(self):
"""Returns the shard ID."""
return self.key().name()
shard_id = property(get_shard_id)
@classmethod
def kind(cls):
"""Returns entity kind."""
return "_AE_MR_ShardState"
@classmethod
def shard_id_from_number(cls, mapreduce_id, shard_number):
"""Get shard id by mapreduce id and shard number.
Args:
mapreduce_id: mapreduce id as string.
shard_number: shard number to compute id for as int.
Returns:
shard id as string.
"""
return "%s-%d" % (mapreduce_id, shard_number)
@classmethod
def get_key_by_shard_id(cls, shard_id):
"""Retrieves the Key for this ShardState.
Args:
shard_id: The shard ID to fetch.
Returns:
The Datatore key to use to retrieve this ShardState.
"""
return db.Key.from_path(cls.kind(), shard_id)
@classmethod
def get_by_shard_id(cls, shard_id):
"""Get shard state from datastore by shard_id.
Args:
shard_id: shard id as string.
Returns:
ShardState for given shard id or None if it's not found.
"""
return cls.get_by_key_name(shard_id)
@classmethod
def find_by_mapreduce_state(cls, mapreduce_state):
"""Find all shard states for given mapreduce.
Deprecated. Use find_all_by_mapreduce_state.
This will be removed after 1.8.9 release.
Args:
mapreduce_state: MapreduceState instance
Returns:
A list of |
"""
This little helper takes Lebedev point and weight data from [1] and produces JSON files.
[1]
https://people.sc.fsu.edu/~jburkardt/datasets/sphere_lebedev_rule/sphere_lebedev_rule.html
"""
import os
import re
import numpy as np
def read(filename):
data = np.loadtxt(filename)
azimuthal_polar = data[:, :2] / 180.0
weights = data[:, 2]
return azimuthal_polar, weights
def chunk_data(weights):
# divide the weight vector into chunks of 6, 8, 12, 24, or 48
chunks = []
k = 0
ref_weight = 0.0
tol = 1.0e-12
while k < len(weights):
if len(chunks) > 0 and abs(weights[k] - ref_weight) < tol:
chunks[-1].append(k)
else:
chunks.append([k])
ref_weight = weights[k]
k += 1
return chunks
def sort_into_symmetry_classes(weights, azimuthal_polar):
data = {"a1": [], "a2": [], "a3": [], "pq0": [], "llm": [], "rsw": []}
for c in chunks:
if len(c) == 6:
data["a1"].append([weights[c[0]]])
elif len(c) == 12:
data["a2"].append([weights[c[0]]])
elif len(c) == 8:
data["a3"].append([weights[c[0]]])
elif len(c) == 24:
if any(abs(azimuthal_polar[c, 1] - 0.5) < 1.0e-12):
# polar == pi/2 => X == [p, q, 0].
# Find the smallest positive phi that's paired with `polar ==
# pi/2`; the symmetry is fully characterized by that phi.
k = np.where(abs(azimuthal_polar[c, 1] - 0.5) < 1.0e-12)[0]
assert len(k) == 8
k2 = np.where(azimuthal_polar[c, 0][k] > 0.0)[0]
azimuthal_min = np.min(azimuthal_polar[c, 0][k][k2])
data["pq0"].append([weights[c[0]], azimuthal_min])
else:
# X = [l, l, m].
# In this case, there must by exactly two phi with the value
| # pi/4. Take the value of the smaller corresponding `polar`;
# all points are characterized by it.
k = np.where(abs(azimuthal_polar[c, 0] - 0.25) < 1.0e-12)[0]
assert len(k) == 2
k2 = np.where(azimuthal_polar[c, 1][k] > 0.0)[0]
polar_min = np.min(azimuthal_polar[c, 1][k][k2])
data["llm"].append([weights[c[0]], polar_min])
else:
assert len(c) == 48
| # This most general symmetry is characterized by two angles; one
# could take any two here.
# To make things easier later on, out of the 6 smallest polar
# angle, take the one with the smallest positive phi.
min_polar = np.min(azimuthal_polar[c, 1])
k = np.where(abs(azimuthal_polar[c, 1] - min_polar) < 1.0e-12)[0]
k2 = np.where(azimuthal_polar[c, 0][k] > 0.0)[0]
min_azimuthal = np.min(azimuthal_polar[c, 0][k][k2])
data["rsw"].append([weights[c[0]], min_azimuthal, min_polar])
return data
def write_json(filename, d):
# Getting floats in scientific notation in python.json is almost impossible, so do
# some work here. Compare with <https://stackoverflow.com/a/1733105/353337>.
class PrettyFloat(float):
def __repr__(self):
return f"{self:.16e}"
def pretty_floats(obj):
if isinstance(obj, float):
return PrettyFloat(obj)
elif isinstance(obj, dict):
return {k: pretty_floats(v) for k, v in obj.items()}
elif isinstance(obj, (list, tuple)):
return list(map(pretty_floats, obj))
return obj
with open(filename, "w") as f:
string = (
pretty_floats(d)
.__repr__()
.replace("'", '"')
.replace("{", "{\n ")
.replace("[[", "[\n [")
.replace("], [", "],\n [")
.replace(']], "', ']\n ],\n "')
.replace("}", "\n}")
.replace("]]", "]\n ]")
)
f.write(string)
return
if __name__ == "__main__":
directory = "data/"
for k, file in enumerate(os.listdir(directory)):
filename = os.fsdecode(file)
m = re.match("lebedev_([0-9]+)\\.txt", filename)
degree = int(m.group(1))
azimuthal_polar, weights = read(os.path.join("data", filename))
chunks = chunk_data(weights)
data = sort_into_symmetry_classes(weights, azimuthal_polar)
delete_list = []
for key in data:
if len(data[key]) == 0:
delete_list.append(key)
for key in delete_list:
data.pop(key)
data["degree"] = degree
write_json(f"lebedev_{degree:03d}.json", data)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('applications', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='question',
name='has_option_other',
field=models.BooleanField(default=False, help_t | ext=b"Used only with 'Choices' question type", verbose_name=b"Allow for 'Other' answer?"),
),
migrations.AlterField(
model_name='question',
name='is_multiple_choice',
field=models.BooleanField(default=False, help_text=b"Used only with 'Choices' question type", verbose_ | name=b'Are there multiple choices allowed?'),
),
migrations.AlterField(
model_name='question',
name='is_required',
field=models.BooleanField(default=True, verbose_name=b'Is the answer to the question required?'),
),
]
|
#!/usr/bin/env python
# encoding: utf-8
"""
generate_keys.py
Generate CSRF and Session keys, output to secret_keys.py file
Usage:
generate_keys.py [-f]
Outputs secret_keys.py file in current folder
By default, an existing secret_keys file will not be replaced.
Use the '-f' flag to force the new keys to be written to the file
"""
import string
import os.path
from optparse import OptionParser
from random import choice
from string import Template
|
# File settings
file_name = 'secret_keys.py'
file_template = Template('''# CSRF- and Session keys
CSRF_SECRET_KEY = '$csrf_key'
SESSION_KEY = '$session_key'
# Facebook Keys
FACEBOOK_APP_ID = '$facebook_app_id'
FACEBOOK_APP_SECRET = '$facebook_app_secret'
''')
# Get options from co | mmand line
parser = OptionParser()
parser.add_option("-f", "--force", dest="force",
help="force overwrite of existing secret_keys file", action="store_true")
parser.add_option("-r", "--randomness", dest="randomness",
help="length (randomness) of generated key; default = 24", default=24)
(options, args) = parser.parse_args()
def generate_randomkey(length):
"""Generate random key, given a number of characters"""
chars = string.letters + string.digits
return ''.join([choice(chars) for i in range(length)])
def write_file(contents):
f = open(file_name, 'wb')
f.write(contents)
f.close()
def generate_keyfile(csrf_key, session_key):
"""Generate random keys for CSRF- and session key"""
output = file_template.safe_substitute(dict(
csrf_key=csrf_key, session_key=session_key,
facebook_app_id='FILL ME IN', facebook_app_secret='FILL ME IN'
))
if os.path.exists(file_name):
if options.force is None:
print "Warning: secret_keys.py file exists. Use '-f' flag to force overwrite."
else:
write_file(output)
else:
write_file(output)
def main():
r = options.randomness
csrf_key = generate_randomkey(r)
session_key = generate_randomkey(r)
generate_keyfile(csrf_key, session_key)
if __name__ == "__main__":
main()
|
'''
Definitions of the configuration for correctness testing.
'''
import brian2
import os
import shutil
import sys
im | port brian2genn
from brian2.tests.features import (Configuration, DefaultConfiguration,
run_feature_tests, run_single_feature_test)
__all__ = ['GeNNConfiguration',
'GeNNConfigurationCPU',
'GeNNConfigurationOptimized']
class GeNNConfiguration(Configuration):
name = 'GeNN'
def before_run(self):
brian2.prefs.codegen.cpp.extra_ | compile_args = []
brian2.prefs._backup()
brian2.set_device('genn')
class GeNNConfigurationCPU(Configuration):
name = 'GeNN_CPU'
def before_run(self):
brian2.prefs.codegen.cpp.extra_compile_args = []
brian2.prefs._backup()
brian2.set_device('genn', use_GPU=False)
class GeNNConfigurationOptimized(Configuration):
name = 'GeNN_optimized'
def before_run(self):
brian2.prefs.reset_to_defaults()
brian2.prefs._backup()
brian2.set_device('genn')
|
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2015 Björn Larsson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import logging
import signal
from robot.application import App
import robot.utils.config as config
from twisted.python import log
__logger = logging.getLogger(__name__)
def setupLogging(args):
# Connect the twisted log with the python log
observer = log.PythonLoggingObserver()
observer.start()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Create main stream handler
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logging.getLogger('').addHandler(ch)
def main(args):
"""
Entry point for the application
Takes a list of command line arguments as parameter
:param args:
:return: return code
| """
setupLogging(args)
config.init()
app = App()
def SIGINT_handler(num, frame):
__logger.info("Signal handler triggered, purging application")
app.purge()
signal.signal(signal.SIGINT, SIGINT_handler)
signal.signal(signal.SIGHUP, SIGINT_handler)
app.setup(args)
app.run()
if __name__=='__main__':
main(sys.a | rgv) |
# -*- coding:UTF-8 -*-
#用于产生吞吐量、可用带宽、抖动、丢包率
import csv
f = open('iperf_13.csv')
c = f.readlines()
csvfile1 = open('iperf_133.csv', 'ab')
writer1 = csv.writer(csvfile1)
for i in c:
# try:
t = i.split()
# print t
# print len(t)
ll = []
if len(t)==14:
a= t[5]
# print type(a)
e=float(a)
# print type(a)
# print a
if e > 10:
# print "helloworld"
h = e * 0.001
k = str(h)
ll.append(k)
else:
a = t[5]
ll.append(a)
b = t[7]
c = t[-5]
d = t[-1]
ll.append(b)
ll.append(c)
if len(d) == 6:
t = d[1:4]
ll.append(t)
# writer1.writerow(d)
# print d
else:
t = d[1]
ll.append(t)
# writer1.writerow(d)
#ll.append(d)
elif len(t) == 13:
| a = t[4]
b = t[6]
c = t[-5]
d = t[-1]
e = float(a)
# print type(a)
# print a
if e > 10:
# print "helloworld"
h = e * 0.001
| k = str(h)
ll.append(k)
else:
a = t[4]
ll.append(a)
ll.append(b)
ll.append(c)
if len(d) == 6:
t = d[1:4]
ll.append(t)
writer1.writerow(ll)
# print d
else:
t = d[1]
ll.append(t)
writer1.writerow(ll)
else:
continue
#print ll
print "all complted"
csvfile1.close()
|
# $Id$
#
# Copyright (C) 2003-2008 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for the BuildComposite functionality
"""
import io
import os
import unittest
from rdkit import RDConfig
from rdkit.Dbase.DbConnection import DbConnect
from rdkit.ML import BuildComposite
import pickle
class TestCase(unittest.TestCase):
def setUp(self):
self.baseDir = os.path.join(RDConfig.RDCodeDir, 'ML', 'test_data')
self.dbName = RDConfig.RDTestDatabase
self.details = BuildComposite.SetDefaults()
self.details.dbName = self.dbName
self.details.dbUser = RDConfig.defaultDBUser
self.details.dbPassword = RDConfig.defaultDBPassword
def _init(self, refCompos, copyBounds=0):
BuildComposite._verbose = 0
conn = DbConnect(self.details.dbName, self.details.tableName)
cols = [x.upper() for x in conn.GetColumnNames()]
cDescs = [x.upper() for x in refCompos.GetDescriptorNames()]
self.assertEqual(cols, cDescs)
self.details.nModels = 10
self.details.lockRandom = 1
self.details.randomSeed = refCompos._randomSeed
self.details.splitFr | ac = refCompos._splitFrac
if self.details.splitFrac:
self.details.splitRun = 1
else:
self.details.splitRun = 0
if not copyBounds:
self.details.qBounds = [0] * len(cols)
else:
self.details.qBounds = refCompos.GetQuantBounds()[0]
def | compare(self, compos, refCompos):
self.assertEqual(len(compos), len(refCompos))
cs = []
rcs = []
for i in range(len(compos)):
cs.append(compos[i])
rcs.append(refCompos[i])
cs.sort(key=lambda x: (x[2], x[2]))
rcs.sort(key=lambda x: (x[2], x[2]))
for i in range(len(compos)):
_, count, err = cs[i]
_, refCount, refErr = rcs[i]
self.assertEqual(count, refCount)
self.assertAlmostEqual(err, refErr, 4)
def test1_basics(self):
# """ basics """
self.details.tableName = 'ferro_quant'
refComposName = 'ferromag_quant_10.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos)
compos = BuildComposite.RunIt(self.details, saveIt=0)
# pickle.dump(compos,open(os.path.join(self.baseDir,refComposName), 'wb'))
# with open(os.path.join(self.baseDir,refComposName), 'rb') as pklF:
# refCompos = pickle.load(pklF)
self.compare(compos, refCompos)
def test2_depth_limit(self):
# """ depth limit """
self.details.tableName = 'ferro_quant'
refComposName = 'ferromag_quant_10_3.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos)
self.details.limitDepth = 3
compos = BuildComposite.RunIt(self.details, saveIt=0)
self.compare(compos, refCompos)
def test3_depth_limit_less_greedy(self):
# """ depth limit + less greedy """
self.details.tableName = 'ferro_quant'
refComposName = 'ferromag_quant_10_3_lessgreedy.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos)
self.details.limitDepth = 3
self.details.lessGreedy = 1
compos = BuildComposite.RunIt(self.details, saveIt=0)
self.compare(compos, refCompos)
def test4_more_trees(self):
# """ more trees """
self.details.tableName = 'ferro_quant'
refComposName = 'ferromag_quant_50_3.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos)
self.details.limitDepth = 3
self.details.nModels = 50
compos = BuildComposite.RunIt(self.details, saveIt=0)
self.compare(compos, refCompos)
def test5_auto_bounds(self):
# """ auto bounds """
self.details.tableName = 'ferro_noquant'
refComposName = 'ferromag_auto_10_3.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos, copyBounds=1)
self.details.limitDepth = 3
self.details.nModels = 10
compos = BuildComposite.RunIt(self.details, saveIt=0)
self.compare(compos, refCompos)
def test6_auto_bounds_real_activity(self):
# """ auto bounds with a real valued activity"""
self.details.tableName = 'ferro_noquant_realact'
refComposName = 'ferromag_auto_10_3.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos, copyBounds=1)
self.details.limitDepth = 3
self.details.nModels = 10
self.details.activityBounds = [0.5]
compos = BuildComposite.RunIt(self.details, saveIt=0)
self.compare(compos, refCompos)
def test7_composite_naiveBayes(self):
# """ Test composite of naive bayes"""
self.details.tableName = 'ferro_noquant'
refComposName = 'ferromag_NaiveBayes.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTFile:
buf = pklTFile.read().replace('\r\n', '\n').encode('utf-8')
pklTFile.close()
with io.BytesIO(buf) as pklFile:
refCompos = pickle.load(pklFile)
self._init(refCompos, copyBounds=1)
self.details.useTrees = 0
self.details.useNaiveBayes = 1
self.details.mEstimateVal = 20.0
self.details.qBounds = [0] + [2] * 6 + [0]
compos = BuildComposite.RunIt(self.details, saveIt=0)
self.compare(compos, refCompos)
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
'''
Created on 13 Jul 2017
@author: T
https://www.youtube.com/watch?v=6isuF_bBiXs
Tkinter:
+ standard lib
+ lightweight
+ good enough
- limited widgets
- strange import
- ugly
Python 3x
- ttk module (theme for better)
* run in transparent loop
* widgets are with parents
Geometry:
: absolute (avoid this)
: pa | ck
+ simple
- not flexible
: grid
'''
from tkinter import *
from tkinter.ttk import *
class WithoutTtk():
def __init__(self, root) | :
self.frame = Frame(root)
self.build_window()
self.frame.pack(fill='both')
menubar = Menu(root)
root['menu'] = menubar
menu_file = Menu(menubar)
menu_file.add_command(label="Quit", command=self.quit)
menubar.add_cascade(menu=menu_file, label="File")
def build_window(self):
label = Label(self.frame, text="How do I look?")
# label.pack(side="top")
label.grid(row=0, column=1)
button_bad = Button(self.frame, text="Terrible", command=self.quit)
# button_bad.pack(side="left")
button_bad.grid(row=0, column=0)
# button_bad.grid(row=0, column=0, sticky="E")
button_good = Button(self.frame, text="not bad", command=self.quit)
# button_good.pack(side="right")
button_good.grid(row=0, column=2)
# button_good.grid(row=0, column=2, sticky="W")
self.frame.rowconfigure(0,weight=2) # row 0 is the one which will grow
self.frame.columnconfigure(1, weight=2)
def quit(self):
self.frame.quit()
if __name__ == '__main__':
root = Tk()
myApp = WithoutTtk(root)
root.mainloop() |
ge you don't need to call this method directly - it
is called automatically at object initialization.
:param path:
File path from which to load the previous config. If `None`,
config is loaded from the default location. If `path` is
specified, subsequent `save()` calls will write to the same
path.
"""
self.path = path or self.path
with open(self.path) as f:
| self._prev_dict = json.load(f)
def changed(self, key):
"""Return True if the current value for this key is different from
the previous value.
"""
if self._prev_dict is None:
return True
return self.previous(key) != self.get(key)
def previous(self, key):
"""Return previous value for this key, or None if there
is no prev | ious value.
"""
if self._prev_dict:
return self._prev_dict.get(key)
return None
def save(self):
"""Save this config to disk.
If the charm is using the :mod:`Services Framework <services.base>`
or :meth:'@hook <Hooks.hook>' decorator, this
is called automatically at the end of successful hook execution.
Otherwise, it should be called directly by user code.
To disable automatic saves, set ``implicit_save=False`` on this
instance.
"""
if self._prev_dict:
for k, v in self._prev_dict.iteritems():
if k not in self:
self[k] = v
with open(self.path, 'w') as f:
json.dump(self, f)
@cached
def config(scope=None):
"""Juju charm configuration"""
config_cmd_line = ['config-get']
if scope is not None:
config_cmd_line.append(scope)
config_cmd_line.append('--format=json')
try:
config_data = json.loads(subprocess.check_output(config_cmd_line))
if scope is not None:
return config_data
return Config(config_data)
except ValueError:
return None
@cached
def relation_get(attribute=None, unit=None, rid=None):
"""Get relation information"""
_args = ['relation-get', '--format=json']
if rid:
_args.append('-r')
_args.append(rid)
_args.append(attribute or '-')
if unit:
_args.append(unit)
try:
return json.loads(subprocess.check_output(_args))
except ValueError:
return None
except CalledProcessError, e:
if e.returncode == 2:
return None
raise
def relation_set(relation_id=None, relation_settings=None, **kwargs):
"""Set relation information for the current unit"""
relation_settings = relation_settings if relation_settings else {}
relation_cmd_line = ['relation-set']
if relation_id is not None:
relation_cmd_line.extend(('-r', relation_id))
for k, v in (relation_settings.items() + kwargs.items()):
if v is None:
relation_cmd_line.append('{}='.format(k))
else:
relation_cmd_line.append('{}={}'.format(k, v))
subprocess.check_call(relation_cmd_line)
# Flush cache of any relation-gets for local unit
flush(local_unit())
@cached
def relation_ids(reltype=None):
"""A list of relation_ids"""
reltype = reltype or relation_type()
relid_cmd_line = ['relation-ids', '--format=json']
if reltype is not None:
relid_cmd_line.append(reltype)
return json.loads(subprocess.check_output(relid_cmd_line)) or []
return []
@cached
def related_units(relid=None):
"""A list of related units"""
relid = relid or relation_id()
units_cmd_line = ['relation-list', '--format=json']
if relid is not None:
units_cmd_line.extend(('-r', relid))
return json.loads(subprocess.check_output(units_cmd_line)) or []
@cached
def relation_for_unit(unit=None, rid=None):
"""Get the json represenation of a unit's relation"""
unit = unit or remote_unit()
relation = relation_get(unit=unit, rid=rid)
for key in relation:
if key.endswith('-list'):
relation[key] = relation[key].split()
relation['__unit__'] = unit
return relation
@cached
def relations_for_id(relid=None):
"""Get relations of a specific relation ID"""
relation_data = []
relid = relid or relation_ids()
for unit in related_units(relid):
unit_data = relation_for_unit(unit, relid)
unit_data['__relid__'] = relid
relation_data.append(unit_data)
return relation_data
@cached
def relations_of_type(reltype=None):
"""Get relations of a specific type"""
relation_data = []
reltype = reltype or relation_type()
for relid in relation_ids(reltype):
for relation in relations_for_id(relid):
relation['__relid__'] = relid
relation_data.append(relation)
return relation_data
@cached
def relation_types():
"""Get a list of relation types supported by this charm"""
charmdir = os.environ.get('CHARM_DIR', '')
mdf = open(os.path.join(charmdir, 'metadata.yaml'))
md = yaml.safe_load(mdf)
rel_types = []
for key in ('provides', 'requires', 'peers'):
section = md.get(key)
if section:
rel_types.extend(section.keys())
mdf.close()
return rel_types
@cached
def relations():
"""Get a nested dictionary of relation data for all related units"""
rels = {}
for reltype in relation_types():
relids = {}
for relid in relation_ids(reltype):
units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
for unit in related_units(relid):
reldata = relation_get(unit=unit, rid=relid)
units[unit] = reldata
relids[relid] = units
rels[reltype] = relids
return rels
@cached
def is_relation_made(relation, keys='private-address'):
'''
Determine whether a relation is established by checking for
presence of key(s). If a list of keys is provided, they
must all be present for the relation to be identified as made
'''
if isinstance(keys, str):
keys = [keys]
for r_id in relation_ids(relation):
for unit in related_units(r_id):
context = {}
for k in keys:
context[k] = relation_get(k, rid=r_id,
unit=unit)
if None not in context.values():
return True
return False
def open_port(port, protocol="TCP"):
"""Open a service network port"""
_args = ['open-port']
_args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args)
def close_port(port, protocol="TCP"):
"""Close a service network port"""
_args = ['close-port']
_args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args)
@cached
def unit_get(attribute):
"""Get the unit ID for the remote unit"""
_args = ['unit-get', '--format=json', attribute]
try:
return json.loads(subprocess.check_output(_args))
except ValueError:
return None
def unit_private_ip():
"""Get this unit's private IP address"""
return unit_get('private-address')
class UnregisteredHookError(Exception):
"""Raised when an undefined hook is called"""
pass
class Hooks(object):
"""A convenient handler for hook functions.
Example::
hooks = Hooks()
# register a hook, taking its name from the function name
@hooks.hook()
def install():
pass # your code here
# register a hook, providing a custom hook name
@hooks.hook("config-changed")
def config_changed():
pass # your code here
if __name__ == "__main__":
# execute a hook based on the name the program is called by
hooks.execute(sys.argv)
"""
def __init__(self):
super(Hooks, self).__init__()
self._hooks = {}
def register(self, name, function):
"""Register a hook"""
self._hooks[name] = function
def execute(self, args):
"""Execute a registered hook based on arg |
"""
To start UNO for both Calc and Writer:
(Note that if you use the current_document command, it will open the Calc's current document since it's the first switch passed)
libreoffice "--accept=socket,host=localhost,port=18100;urp;StarOffice.ServiceManager" --norestore --nofirststartwizard --nologo --calc --writer
To start UNO without opening a libreoffice instance, use the --headless switch:
(Note that this doesn't allow to use the current_document command)
libreoffice --headless "--accept=socket,host=localhost,port=18100;urp;StarOffice.ServiceManager" --norestore --nofirststartwizard --nologo --calc --writer
"""
from uno import getComponentContext
from com.sun.star.connection import ConnectionSetupException
from com.sun.star.awt.FontWeight import BOLD
import sys
# For saving the file
from com.sun.star.beans import PropertyValue
from uno import systemPathToFileUrl
class Message(object):
connection_setup_exception = "Error: Please start the uno bridge first."
# Connect to libreoffice using UNO
UNO_PORT = 18100
try:
localContext = getComponentContext()
resolver = localContext.ServiceManager.createInstanceWithContext(
"com.sun.star.bridge.UnoUrlResolver", localContext)
context = resolver.resolve(
"uno:socket,host=localhost,port=%d;urp;StarOffice.ComponentContext" % UNO_PORT)
except ConnectionSetupException:
print("%s\n" % Message.connection_setup_exception)
sys.exit(1)
# Get the desktop service
desktop = context.ServiceManager.createInstanceWithContext("com.sun.star.frame.Desktop", context)
class Interface(object):
variables = {}
@staticmethod
def current_document():
"""current_document()"""
return desktop.getCurrentComponent()
@staticmethod
def load_document(path):
"""load_document(['path'])"""
url = systemPathToFileUrl(path)
return desktop.loadComponentFromURL(url ,"_blank", 0, ())
@staticmethod
def new_document():
"""new_document()"""
return desktop.loadComponentFromURL("private:factory/scalc","_blank", 0, ())
@staticmethod
def current_sheet(document):
"""[document].current_sheet()"""
return document.getCurrentController().getActiveSheet()
@staticmethod
def save_as(document, path):
"""[document].save_as(['path'])"""
url = systemPathToFileUrl(path)
# Set file to overwrite
property_value = PropertyValue()
property_value.Name = 'Overwrite'
property_value.Value = 'overwrite'
properties = (property_value,)
# Save to file
document.storeAsURL(url, properties)
return True
@staticmethod
def fetch_cell(sheet, cell_range):
"""[sheet].fetch_cell(['A1'])"""
return sheet.getCellRangeByName(cell_range)
@staticmethod
def set_text(cell, string):
"""[cell].set_text(['string'])"""
if (string.startswith('"') and string.endswith('"')) or \
(string.startswith("'") and string.endswith("'")):
string = string[1:-1]
cell.setString(string)
return True
@staticmethod
def get_text(cell):
"""[cell].get_text()"""
return cell.getString()
@staticmethod
def weight(cell, bold):
"""[cell].weight(['bold'])"""
if bold.strip("'").strip('"') == "bold":
| cell.CharWeight = BOLD
return True
else:
| return False
|
"""
Records for SMART Reference EMR
Ben Adida & Josh Mandel
"""
from base import *
from django.utils import simplejson
from django.conf import settings
from smart.common.rdf_tools.rdf_ontology import ontology
from smart.common.rdf_tools.util import rdf, foaf, vcard, sp, serialize_rdf, parse_rdf, bound_graph, URIRef, Namespace
from smart.lib import utils
from smart.models.apps import *
from smart.models.accounts import *
from smart.triplestore import *
from string import Template
import re
import datetime
class Record(Object):
Meta = BaseMeta()
full_name = models.CharField(max_length=150, null=False)
def __unicode__(self):
return 'Record %s' % self.id
def generate_direct_access_token(self, account, token_secret=None):
u = RecordDirectAccessToken.objects.create(
record=self,
account=account,
token_secret=token_secret
)
u.save()
return u
@classmethod
def search_records(cls, query):
try:
c = TripleStore()
ids = parse_rdf(c.sparql(query))
except Exception, e:
return None
from smart.models.record_object import RecordObject
demographics = RecordObject[sp.Demographics]
subjects = [p[0] for p in ids.triples((None, rdf['type'],
sp.Demographics))]
ret = c.get_contexts(subjects)
return ret
@classmethod
def rdf_to_objects(cls, res):
if res is None:
return None
m = parse_rdf(res)
record_list = []
q = """
PREFIX sp:<http://smartplatforms.org/terms#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dcterms:<http://purl.org/dc/terms/>
PREFIX v:<http://www.w3.org/2006/vcard/ns#>
PREFIX foaf:<http://xmlns.com/foaf/0.1/>
SELECT ?gn ?fn ?dob ?gender ?zipcode ?d
WHERE {
?d rdf:type sp:Demographics.
?d v:n ?n.
?n v:given-name ?gn.
?n v:family-name ?fn.
optional{?d foaf:gender ?gender.}
optional{?d v:bday ?dob.}
optional{
?d v:adr ?a.
?a rdf:type v:Pref.
?a v:postal-code ?zipcode.
}
optional{
?d v:adr ?a.
?a v:postal-code ?zipcode.
}
}"""
people = list(m.query(q))
for p in people:
record = Record()
record.id = re.search(
"\/recor | ds\/(.*?)\/demographics", str(p[5])).group(1)
record.fn, record.ln, record.dob, record.gender, record.zipcode = p[:5]
record_list.append(record)
return record_list
class AccountApp(Object):
account = models.ForeignKey(Account)
app = models.ForeignKey(PHA)
# u | niqueness
class Meta:
app_label = APP_LABEL
unique_together = (('account', 'app'),)
# Not an OAuth token, but an opaque token that can be used to support
# auto-login via a direct link to a smart_ui_server.
class RecordDirectAccessToken(Object):
record = models.ForeignKey(
Record, related_name='direct_access_tokens', null=False)
account = models.ForeignKey(
Account, related_name='direct_record_shares', null=False)
token = models.CharField(max_length=40, unique=True)
token_secret = models.CharField(max_length=60, null=True)
expires_at = models.DateTimeField(null=False)
def save(self, *args, **kwargs):
if not self.token:
self.token = utils.random_string(30)
if self.expires_at is None:
minutes_to_expire = 30
try:
minutes_to_expire = settings.MINUTES_TO_EXPIRE_DIRECT_ACCESS
except:
pass
self.expires_at = datetime.datetime.utcnow(
) + datetime.timedelta(minutes=minutes_to_expire)
super(RecordDirectAccessToken, self).save(*args, **kwargs)
class Meta:
app_label = APP_LABEL
class RecordAlert(Object):
record = models.ForeignKey(Record)
alert_text = models.TextField(null=False)
alert_time = models.DateTimeField(auto_now_add=True, null=False)
triggering_app = models.ForeignKey(
'OAuthApp', null=False, related_name='alerts')
acknowledged_by = models.ForeignKey('Account', null=True)
acknowledged_at = models.DateTimeField(null=True)
# uniqueness
class Meta:
app_label = APP_LABEL
@classmethod
def from_rdf(cls, rdfstring, record, app):
s = parse_rdf(rdfstring)
q = """
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX sp: <http://smartplatforms.org/terms#>
SELECT ?notes ?severity
WHERE {
?a rdf:type sp:Alert.
?a sp:notes ?notes.
?a sp:severity ?scv.
?scv sp:code ?severity.
}"""
r = list(s.query(q))
assert len(r) == 1, "Expected one alert in post, found %s" % len(r)
(notes, severity) = r[0]
assert type(notes) == Literal
spcodes = Namespace("http://smartplatforms.org/terms/code/alertLevel#")
assert severity in [spcodes.information, spcodes.warning,
spcodes.critical]
a = RecordAlert(
record=record,
alert_text=str(notes),
triggering_app=app
)
a.save()
return a
def acknowledge(self, account):
self.acknowledged_by = account
self.acknowledged_at = datetime.datetime.now()
self.save()
class LimitedAccount(Account):
records = models.ManyToManyField(Record, related_name="+")
|
#
# Copyright (c) 2013 Markus Eliasson, http://www.quarterapp.com/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import storage
import tornado.database
class QuarterSettings(object):
"""
Application settings contains the settings specific to the application,
not the running server. I.e. port numbers and such should not be kept here
but in the application configuration file (quarterapp.conf).
These settings might be updated at runtime
"""
def __init__(self, db):
"""
Constructs the application settings and try to update the settings
from database
@param db The Tornado database object used to access the database
"""
self.db = db
self.settings = {}
self.update()
def update(self):
"""
Update the settings from the database, if cannot read from database the
old settings remain active
"""
logging.info("Updating settings")
settings = storage.get_settings(self.db)
if settings:
for row in settings:
self.settings[row.name] = row.value
else:
logging.warn("Could not find any settings in database - everything setup ok?")
def get_value(self, key):
"""
Get the setting value for the given key, if no setting exist for this key
None is returned
"""
if self.settings.has_key(key):
return self.settings[key]
else:
return None
def pu | t_val | ue(self, key, value):
"""
Updates the value for the given key. If this key does not exist to begin with
this function will not insert the value. I.e. this function will only update
existing values.
@param key The settings key to update value for
@param value The new value
"""
if self.settings.has_key(key):
storage.put_setting(self.db, key, value)
self.settings[key] = value
else:
logging.warning("Trying to update a settings key that does not exists! (%s)", key)
raise Exception("Trying to update a settings key that does not exists!")
def create_default_config(path):
"""Create a quarterapp.conf file from the example config file"""
import shutil, os.path
target = os.path.join(path, 'quarterapp.conf')
if os.path.exists(target):
print('Cowardly refusing to overwrite configuration file')
else:
shutil.copyfile(os.path.join(os.path.dirname(__file__), 'resources', 'quarterapp.example.conf'), target) |
from | __future__ import print_function
i | mport sys
def main():
for line in sys.stdin:
line = line.strip()
if not line.startswith("#STRUCT_SIZER"):
continue
line = line[14:]
line = line.replace("#", "")
print(line)
if __name__ == "__main__":
main()
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for acme.tf.networks.distributions."""
from absl.testing import absltest
from absl.testing import parameterized
from acme.tf.networks import distributions
import numpy as np
from numpy import testing as npt
class DiscreteValuedDistributionTest(parameterized.TestCase):
@parameterized.parameters(
((), (), 5),
((2,), (), 5),
((), (3, 4), 5),
((2,), (3, 4), 5),
((2, 6), (3, 4), 5),
)
def test_constructor(self, batch_shape, event_shape, num_values):
logits_shape = batch_shape + event_shape + (num_values,)
logits_size = np.prod(logits_shape)
logits = np.arange(logits_size, dtype=float).reshape(logits_shape)
values = np.linspace(start=-np.ones(event_shape, dtype=float),
stop=np.ones(event_shape, dtype=float),
num=num_values | ,
axis=-1)
distribution = distributions.DiscreteValuedDistribution(values=values,
logits=logits)
# Check batch and event shapes.
self.assertEqual(distribution.batch_shape, batch_shape)
self.assertEqual(distribution.event_shape, event_shape)
self.assertEqual(distribution.logits_parameter().shape.as_list(),
list(logits.shape))
self.assertEqual(distributio | n.logits_parameter().shape.as_list()[-1],
logits.shape[-1])
# Test slicing
if len(batch_shape) == 1:
slice_0_logits = distribution[1:3].logits_parameter().numpy()
expected_slice_0_logits = distribution.logits_parameter().numpy()[1:3]
npt.assert_allclose(slice_0_logits, expected_slice_0_logits)
elif len(batch_shape) == 2:
slice_logits = distribution[0, 1:3].logits_parameter().numpy()
expected_slice_logits = distribution.logits_parameter().numpy()[0, 1:3]
npt.assert_allclose(slice_logits, expected_slice_logits)
else:
assert not batch_shape
if __name__ == '__main__':
absltest.main()
|
mock_find_folder,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_volume_folder(
12345,
self.configuration.dell_sc_volume_folder)
mock_find_folder.assert_called_once_with(
'StorageCenter/ScVolumeFolder/GetList',
12345,
self.configuration.dell_sc_volume_folder)
self.assertEqual(self.FLDR, res, 'Unexpected Folder')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=SCSERVERS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_init_volume(self,
mock_post,
mock_get_json,
mock_map_volume,
mock_unmap_volume,
mock_close_connection,
mock_open_connection,
mock_init):
self.scapi._init_volume(self.VOLUME)
mock_map_volume.assert_called()
mock_unmap_volume.assert_called()
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_init_volume_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where ScServer list fails
self.scapi._init_volume(self.VOLUME)
mock_post.assert_called()
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=SCSERVERS_DOWN)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_init_volume_servers_down(self,
mock_post,
mock_get_json,
mock_map_volume,
mock_unmap_volume,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where ScServer Status = Down
self.scapi._init_volume(self.VOLUME)
mock_map_volume.assert_called()
mock_unmap_volume.assert_called()
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.Stor | ageCenterApi,
'_find_volume_folder',
| return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_volume(self,
mock_post,
mock_find_volume_folder,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_volume(
self.volume_name,
1,
12345,
self.configuration.dell_sc_volume_folder)
mock_post.assert_called()
mock_get_json.assert_called()
mock_find_volume_folder.assert_called_once_with(
12345, self.configuration.dell_sc_volume_folder)
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_volume_folder_path',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_vol_and_folder(self,
mock_post,
mock_find_volume_folder,
mock_create_vol_folder_path,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Test calling create_volume where volume folder has to be created
res = self.scapi.create_volume(
self.volume_name,
1,
12345,
self.configuration.dell_sc_volume_folder)
mock_post.assert_called()
mock_get_json.assert_called()
mock_create_vol_folder_path.assert_called_once_with(
12345,
self.configuration.dell_sc_volume_folder)
mock_find_volume_folder.assert_called_once_with(
12345, self.configuration.dell_sc_volume_folder)
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_volume_folder_path',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_vol_folder_fail(self,
mock_post,
mock_find_volume_folder,
mock_create_vol_folder_path,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Test calling create_volume where volume folder does not exist and
# fails to be created
res = self.scapi.create_volume(
self.volume_name,
1,
12345,
self.configuration.dell_sc_volume_folder)
mock_post.assert_called()
mock_get_json.assert_called()
mock_create_vol_folder_path.assert_called_once_with(
12345,
self.configuration.dell_sc_volume_folder)
mock_find_volume_folder.assert_called_once_with(
12345, self.configuration.dell_sc_volume_folder)
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_ |
# -*- coding: utf-8 -*-
# python std lib
import asyncio
import os
import sys
import json
# rediscluster imports
from aredis import StrictRedisCluster, StrictRedis
# 3rd party imports
import pytest
from distutils.version import StrictVersion
# put our path in front so we can be sure we are testing locally no | t against the global package
basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(1, basepath)
_REDIS_VERSIONS = {}
def get_versions(**kwargs):
key = json.dumps(kwargs)
if key not in _REDIS_VERSIONS:
clien | t = _get_client(**kwargs)
loop = asyncio.get_event_loop()
info = loop.run_until_complete(client.info())
_REDIS_VERSIONS[key] = {key: value['redis_version'] for key, value in info.items()}
return _REDIS_VERSIONS[key]
def _get_client(cls=None, **kwargs):
if not cls:
cls = StrictRedisCluster
params = {
'startup_nodes': [{
'host': '127.0.0.1', 'port': 7000
}],
'stream_timeout': 10,
}
params.update(kwargs)
return cls(**params)
def _init_mgt_client(request, cls=None, **kwargs):
"""
"""
client = _get_client(cls=cls, **kwargs)
if request:
def teardown():
client.connection_pool.disconnect()
request.addfinalizer(teardown)
return client
def skip_if_not_password_protected_nodes():
"""
"""
return pytest.mark.skipif('TEST_PASSWORD_PROTECTED' not in os.environ, reason="")
def skip_if_server_version_lt(min_version):
"""
"""
versions = get_versions()
for version in versions.values():
if StrictVersion(version) < StrictVersion(min_version):
return pytest.mark.skipif(True, reason="")
return pytest.mark.skipif(False, reason="")
def skip_if_redis_py_version_lt(min_version):
"""
"""
import aredis
version = aredis.__version__
if StrictVersion(version) < StrictVersion(min_version):
return pytest.mark.skipif(True, reason="")
return pytest.mark.skipif(False, reason="")
@pytest.fixture()
def o(request, *args, **kwargs):
"""
Create a StrictRedisCluster instance with decode_responses set to True.
"""
params = {'decode_responses': True}
params.update(kwargs)
return _get_client(cls=StrictRedisCluster, **params)
@pytest.fixture()
def r(request, *args, **kwargs):
"""
Create a StrictRedisCluster instance with default settings.
"""
return _get_client(cls=StrictRedisCluster, **kwargs)
@pytest.fixture()
def ro(request, *args, **kwargs):
"""
Create a StrictRedisCluster instance with readonly mode
"""
params = {'readonly': True}
params.update(kwargs)
return _get_client(cls=StrictRedisCluster, **params)
@pytest.fixture()
def s(*args, **kwargs):
"""
Create a StrictRedisCluster instance with 'init_slot_cache' set to false
"""
s = _get_client(**kwargs)
assert s.connection_pool.nodes.slots == {}
assert s.connection_pool.nodes.nodes == {}
return s
@pytest.fixture()
def t(*args, **kwargs):
"""
Create a regular StrictRedis object instance
"""
return StrictRedis(*args, **kwargs)
@pytest.fixture()
def sr(request, *args, **kwargs):
"""
Returns a instance of StrictRedisCluster
"""
return _get_client(reinitialize_steps=1, cls=StrictRedisCluster, **kwargs)
|
'''
Written by Dmitry Chirikov <dmitry@chirikov.ru>
This file is part of Luna, cluster provisioning tool
https://github.com/dchirikov/luna
This file is part of Luna.
Luna is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Luna is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Luna. If not, see <http://www.gnu.org/licenses/>.
'''
import re
import socket
from binascii import hexlify, unhexlify
import logging
log = logging.getLogger(__name__)
af = {
4: socket.AF_INET,
6: socket.AF_INET6,
}
hex_format = {
4: '08x',
6: '032x'
}
def ntoa(num_ip, ver=4):
"""
Convert the IP numip from the binary notation
into the IPv4 numbers-and-dots form
"""
try:
ip = socket.inet_ntop(
af[ver],
unhexlify(format(num_ip, hex_format[ver]))
)
return ip
except:
err_msg = ("Cannot convert '{}' from C"
" to IPv{} format".format(num_ip, ver))
log.error(err_msg)
raise RuntimeError, err_msg
def aton(ip, ver=4):
"""
Convert the IP ip from the IPv4 numbers-and-dots
notation into binary form (in network byte order)
"""
try:
absnum = int(hexlify(socket.inet_pton(af[ver], ip)), 16)
return long(absnum)
except:
err_msg = "Cannot convert IP '{}' to C format".format(ip)
log.error(err_msg)
raise RuntimeError, err_msg
def reltoa(num_net, rel_ip, ver):
"""
Convert a relative ip (a number relative to the base of the
network obtained using 'get_num_subnet') into an IPv4 address
"""
num_ip = int(num_net) + int(rel_ip)
return ntoa(num_ip, ver)
def atorel(ip, num_net, prefix, ver=4):
"""
Convert an IPv4 address into a number relative to the base of
the network obtained using 'get_num_subnet'
"""
num_ip = aton(ip, ver)
# Check if the ip address actually belongs to num_net/prefix
if not ip_in_net(ip, num_net, prefix, ver):
err_msg = ("Network '{}/{}' does not contain '{}'"
.format(ntoa(num_net, ver), prefix, ip))
log.error(err_msg)
raise RuntimeError, err_msg
relative_num = long(num_ip - num_net)
return relative_num
def get_num_subnet(ip, prefix, ver=4):
"""
Get the address of the subnet to which ip belongs in binary form
"""
maxbits = 32
if ver == 6:
maxbits = 128
try:
prefix = int(prefix)
except:
err_msg = "Prefix '{}' is invalid, must be 'int'".format(prefix)
| log.error(err_msg)
raise RuntimeError, err_msg
if ver == 4 and prefix not in range(1, 31):
err_msg = "Prefix should be in the range [1..30]"
log.error(err_msg)
raise RuntimeError, err_msg
if ver == 6 and prefix not in range(1, 127):
err_msg = "Prefix should be in the range [1..126]"
log.error(err_msg)
raise RuntimeError, err_msg
if type(ip) is long or type(ip) is int:
num_ip = ip
else:
try:
| num_ip = aton(ip, ver)
except socket.error:
err_msg = "'{}' is not a valid IP".format(ip)
log.error(err_msg)
raise RuntimeError, err_msg
num_mask = (((1 << maxbits) - 1)
^ ((1 << (maxbits+1 - prefix) - 1) - 1))
num_subnet = long(num_ip & num_mask)
return num_subnet
def ip_in_net(ip, num_net, prefix, ver=4):
"""
Check if an address (either in binary or IPv4 form) belongs to
num_net/prefix
"""
if type(ip) is long or type(ip) is int:
num_ip = ip
else:
num_ip = aton(ip, ver)
num_subnet1 = get_num_subnet(num_net, prefix, ver)
num_subnet2 = get_num_subnet(num_ip, prefix, ver)
return num_subnet1 == num_subnet2
def guess_ns_hostname():
"""
Try to guess the hostname to use for the nameserver
it supports hosts of the format host-N, hostN for HA
configurations. Returns the current hostname otherwise
"""
ns_hostname = socket.gethostname().split('.')[0]
if ns_hostname[-1:].isdigit():
guessed_name = re.match('(.*)[0-9]+$', ns_hostname).group(1)
if guessed_name[-1] == '-':
guessed_name = guessed_name[:-1]
try:
guessed_ip = socket.gethostbyname(guessed_name)
except:
guessed_ip = None
if guessed_ip:
log.info(("Guessed that NS server should be '%s', "
"instead of '%s'. "
"Please update if this is not correct.") %
(guessed_name, ns_hostname))
return guessed_name
# Return the current host's hostname if the guessed name could not
# be resolved
return ns_hostname
def get_ip_version(ip):
for ver in [4, 6]:
try:
int(hexlify(socket.inet_pton(af[ver], ip)), 16)
return ver
except:
pass
return None
def ipv6_unwrap(ip):
"""
Retruns IPv6 ip address in full form:
fe80:1:: => fe80:0001:0000:0000:0000:0000:0000:0000
2001:db8::ff00:42:8329 => 2001:0db8:0000:0000:0000:ff00:0042:8329
"""
ip = ntoa(aton(ip, 6), 6)
out = [''] * 8
start, end = ip.split('::')
start_splited = start.split(':')
end_splited = end.split(':')
out[:len(start_splited)] = start_splited
i = 1
for elem in reversed(end_splited):
out[-i] = elem
i += 1
for i in range(len(out)):
out[i] = '{:0>4}'.format(out[i])
return ":".join(out)
|
Tests for projectq.ops._command."""
import math
import sys
from copy import deepcopy
import pytest
from projectq import MainEngine
from projectq.cengines import DummyEngine
from projectq.meta import ComputeTag, canonical_ctrl_state
from projectq.ops import BasicGate, CtrlAll, NotMergeable, Rx, _command
from projectq.types import Qubit, Qureg, WeakQubitRef
@pytest.fixture
def main_engine():
return MainEngine(backend=DummyEngine(), engine_list=[DummyEngine()])
def test_command_init(main_engine):
qureg0 = Qureg([Qubit(main_engine, 0)])
qureg1 = Qureg([Qubit(main_engine, 1)])
qureg2 = Qureg([Qubit(main_engine, 2)])
# qureg3 = Qureg([Qubit(main_engine, 3)])
# qureg4 = Qureg([Qubit(main_engine, 4)])
gate = BasicGate()
cmd = _command.Command(main_engine, gate, (qureg0, qureg1, qureg2))
assert cmd.gate == gate
assert cmd.tags == []
expected_tuple = (qureg0, qureg1, qureg2)
for cmd_qureg, expected_qureg in zip(cmd.qubits, expected_tuple):
assert cmd_qureg[0].id == expected_qureg[0].id
# Testing that Qubits are now WeakQubitRef objects
assert type(cmd_qureg[0]) == WeakQubitRef
assert cmd._engine == main_engine
# Test that quregs are ordered if gate has interchangeable qubits:
symmetric_gate = BasicGate()
symmetric_gate.interchangeable_qubit_indices = [[0, 1]]
symmetric_cmd = _command.Command(main_engine, symmetric_gate, (qureg2, qureg1, qureg0))
assert cmd.gate == gate
assert cmd.tags == []
expected_ordered_tuple = (qureg1, qureg2, qureg0)
for cmd_qureg, expected_qureg in zip(symmetric_cmd.qubits, expected_ordered_tuple):
assert cmd_qureg[0].id == expected_qureg[0].id
assert symmetric_cmd._engine == main_engine
def test_command_deepcopy(main_engine):
qureg0 = Qureg([Qubit(main_engine, 0)])
qureg1 = Qureg([Qubit(main_engine, 1)])
gate = BasicGate()
cmd = _command.Command(main_engine, gate, (qureg0,))
cmd.add_control_qubits(qureg1)
cmd.tags.append("MyTestTag")
copied_cmd = deepcopy(cmd)
# Test that deepcopy gives same cmd
assert copied_cmd.gate == gate
assert copied_cmd.tags == ["MyTestTag"]
assert len(copied_cmd.qubits) == 1
assert copied_cmd.qubits[0][0].id == qureg0[0].id
assert len(copied_cmd.control_qubits) == 1
assert copied_cmd.control_qubits[0].id == qureg1[0].id
# Engine should not be deepcopied but a reference:
assert id(copied_cmd.engine) == id(main_engine)
# Test that deepcopy is actually a deepcopy
cmd.tags = ["ChangedTag"]
assert copied_cmd.tags == ["MyTestTag"]
cmd.control_qubits[0].id == 10
assert copied_cmd.control_qubits[0].id == qureg1[0].id
cmd.gate = "ChangedGate"
assert copied_cmd.gate == gate
def test_command_get_inverse(main_engine):
qubit = main_engine.allocate_qubit()
ctrl_qubit = main_engine.allocate_qubit()
cmd = _command.Command(main_engine, Rx(0.5), (qubit,))
cmd.add_control_qubits(ctrl_qubit)
cmd.tags = [ComputeTag()]
inverse_cmd = cmd.get_inverse()
assert inverse_cmd.gate == Rx(-0.5 + 4 * math.pi)
assert len(cmd.qubits) == len(inverse_cmd.qubits)
assert cmd.qubits[0][0].id == inverse_cmd.qubits[0][0].id
assert id(cmd.qubits[0][0]) != id(inverse_cmd.qubits[0][0])
assert len(cmd.control_qubits) == len(inverse_cmd.control_qubits)
assert cmd.control_qubits[0].id == inverse_cmd.control_qubits[0].id
assert id(cmd.control_qubits[0]) != id(inverse_cmd.control_qubits[0])
assert cmd.tags == inverse_cmd.tags
assert id(cmd.tags[0]) != id(inverse_cmd.tags[0])
assert id(cmd.engine) == id(inverse_cmd.engine)
def test_command_get_merged(main_engine):
qubit = main_engine.allocate_qubit()
ctrl_qubit = main_engine.allocate_qubit()
cmd = _command.Command(main_engine, Rx(0.5), (qubit,))
cmd.tags = ["TestTag"]
cmd.add_control_qubits(ctrl_qubit)
# Merge two commands
cmd2 = _command.Command(main_engine, Rx(0.5), (qubit,))
cmd2.add_control_qubits(ctrl_qubit)
cmd2.tags = ["TestTag"]
merged_cmd = cmd.get_merged(cmd2)
expected_cmd = _command.Command(main_engine, Rx(1.0), (qubit,))
expected_cmd.add_control_qubits(ctrl_qubit)
expected_cmd.tags = ["TestTag"]
assert merged_cmd == expected_cmd
# Don't merge commands as different control qubits
cmd3 = _command.Command(main_engine, Rx(0.5), (qubit,))
cmd3.tags = ["TestTag"]
with pytest.raises(NotMergeable):
cmd.get_merged(cmd3)
# Don't merge commands as different tags
cmd4 = _command.Command(main_engine, Rx(0.5), (qubit,))
cmd4.add_control_qubits(ctrl_qubit)
with pytest.raises(NotMergeable):
cmd.get_merged(cmd4)
def test_command_is_identity(main_engine):
qubit = main_engine.allocate_qubit()
qubit2 = main_engine.allocate_qubit()
cmd = _command.Command(main_engine, Rx(0.0), (qubit,))
cmd2 = _command.Command(main_engine, Rx(0.5), (qubit2,))
inverse_cmd = cmd.get_inverse()
inverse_cmd2 = cmd2.get_inverse()
assert inverse_cmd.gate.is_identity()
assert cmd.gate.is_identity()
assert not inverse_cmd2.gate.is_identity()
assert not cmd2.gate.is_identity()
def test_command_order_qubits(main_engine):
qubit0 = Qureg([Qubit(main_engine, 0)])
qubit1 = Qureg([Qubit(main_engine, 1)])
qubit2 = Qureg([Qubit(main_engine, 2)])
qubit3 = Qureg([Qubit(main_engine, 3)])
qubit4 = Qureg([Qubit(main_engine, 4)])
qubit5 = Qureg([Qubit(main_engine, 5)])
gate = BasicGate()
gate.interchangeable_qubit_indices = [[0, 4, 5], [1, 2]]
input_tuple = (qubit4, qubit5, qubit3, qubit2, qubit1, qubit0)
expected_tuple = (qubit0, qubit3, qubit5, qubit2, qubit1, qubit4)
cmd = _command.Command(main_engine, gate, input_tuple)
for ordered_qubit, expected_qubit in zip(cmd.qubits, expected_tuple):
assert ordered_qubit[0].id == expected_qubit[0].id
def test_command_interchangeable_qubit_indices(main_engine):
gate = BasicGate()
gate.interchangeable_qubit_indices = [[0, 4, 5], [1, 2]]
qubit0 = Qureg([Qubit(main_engine, 0)])
qubit1 = Qureg([Qubit(main_engine, 1)])
qubit2 = Qureg([Qubit(main_engine, 2)])
qubit3 = Qureg([Qubit(main_engine, 3)])
qubit4 = Qureg([Qubit(main_engine, 4)])
qubit5 = Qureg([Qubit(main_engine, 5)])
input_tuple = (qubit4, qubit5, qubit3, qubit2, qubit1, qubit0)
cmd = _command.Command(main_engine, gate, input_tuple)
assert (
cmd.interchangeable_qubit_indices
== [
[0, 4, 5],
[1, 2],
]
or cmd.interchangeable_qubit_indices == [[1, 2], [0, 4, 5]]
)
@pytest.mark.parametrize(
'state',
[0, 1, '0', '1', CtrlAll.One, CtrlAll.Zero],
ids=['int(0)', 'int(1)', 'str(0)', 'str(1)', 'CtrlAll.One', 'CtrlAll.Zero'],
)
def test_commmand_add_control_qubits_ | one(main_engine, state):
qubit0 = Qureg([Qubit(main_engine, 0)])
qubit1 = Qureg([Qubit(main_engine, 1)])
cmd = _command.Command(main_engine, Rx(0.5), (qubit0,))
cmd.add_control_qubits(qubit1, state=state)
assert cmd.control_qubits[0].id == 1
assert cmd.control_state == canonical_ctrl_state(state, 1)
with pytest.raises(ValueError):
cmd.add_control_qubits(qubit0[0])
|
@pytest.mark.parametrize(
'state',
[0, 1, 2, 3, '00', '01', '10', '11', CtrlAll.One, CtrlAll.Zero],
ids=[
'int(0)',
'int(1)',
'int(2)',
'int(3)',
'str(00)',
'str(01)',
'str(10)',
'str(1)',
'CtrlAll.One',
'CtrlAll.Zero',
],
)
def test_commmand_add_control_qubits_two(main_engine, state):
qubit0 = Qureg([Qubit(main_engine, 0)])
qubit1 = Qureg([Qubit(main_engine, 1)])
qubit2 = Qureg([Qubit(main_engine, 2)])
qubit3 = Qureg([Qubit(main_engine, 3)])
cmd = _command.Command(main_engine, Rx(0.5), (qubit0,), qubit1)
cmd.add_control_qubits(qubit2 + qubit3, state)
assert cmd.control_qubits[0].id == 1
assert cmd.control_state == '1' + canonical_ctrl_state(state, 2)
def test_command_all_qubits(main_engine):
qubit0 = Qureg([Qubit(main_engine, 0)])
qubit1 = Qureg([Qubit(ma |
import numpy
import bob.bio. | spear
# The a | uthors of CQCC features recommend to use only first 20 features, plus deltas and delta-deltas
# feature vector is 60 in this case
cqcc20 = bob.bio.spear.extractor.CQCCFeatures(
features_mask=numpy.r_[
numpy.arange(0, 20), numpy.arange(30, 50), numpy.arange(60, 80)
]
)
|
from SimpleCV import Camera, Image, Color, TemporalColorTracker, ROI, Display
import matplotlib.pyplot as plt
cam = Camera(1)
tct = TemporalColorTracker()
img = c | am.getImage()
roi = ROI(img.width*0.45,img.height*0.45,img.width*0.1,img.height*0.1,im | g)
tct.train(cam,roi=roi,maxFrames=250,pkWndw=20)
# Matplot Lib example plotting
plotc = {'r':'r','g':'g','b':'b','i':'m','h':'y'}
for key in tct.data.keys():
plt.plot(tct.data[key],plotc[key])
for pt in tct.peaks[key]:
plt.plot(pt[0],pt[1],'r*')
for pt in tct.valleys[key]:
plt.plot(pt[0],pt[1],'b*')
plt.grid()
plt.show()
disp = Display((800,600))
while disp.isNotDone():
img = cam.getImage()
result = tct.recognize(img)
plt.plot(tct._rtData,'r-')
plt.grid()
plt.savefig('temp.png')
plt.clf()
plotImg = Image('temp.png')
roi = ROI(img.width*0.45,img.height*0.45,img.width*0.1,img.height*0.1,img)
roi.draw(width=3)
img.drawText(str(result),20,20,color=Color.RED,fontsize=32)
img = img.applyLayers()
img = img.blit(plotImg.resize(w=img.width,h=img.height),pos=(0,0),alpha=0.5)
img.save(disp)
|
#! /usr/bin/env python
import argparse
import sys
from yamltempl import yamlutils, vtl
def main():
parser = argparse.ArgumentParser(
description="Merge yaml data into a Velocity Template Language template")
parser.add_argument('yamlfile',
metavar='filename.yaml',
type=argparse.FileType('r'),
| help='the yaml file containing the data')
parser.add_argument('-t', '--template',
metavar='file',
type=argparse.FileType('r'),
default=sys.stdin,
he | lp='the template file. If omitted, the template '
'is read from standard input')
parser.add_argument('-o', '--output',
metavar='file',
type=argparse.FileType('w'),
default=sys.stdout,
help='the output file, where the result should be '
'written. Standard output is used if omitted')
args = parser.parse_args()
yamldata = yamlutils.ordered_load(args.yamlfile)
args.yamlfile.close()
templ = args.template.read().decode('utf8')
args.template.close()
result = vtl.merge(yamldata, templ)
args.output.write(result.encode('utf8'))
args.output.close()
if __name__ == '__main__':
main()
|
ards
from NossiPack.User import Userlist
from NossiPack.VampireCharacter import VampireCharacter
from NossiPack.krypta import DescriptiveError
from NossiSite.base import app as defaultapp, log
from NossiSite.helpers import checklogin
def register(app=None):
if app is None:
app = defaultapp
@app.route("/setfromsource/")
def setfromsource():
checklogin()
source = request.args.get("source")
ul = Userlist()
u = ul.loaduserbyname(session.get("user"))
try:
new = VampireCharacter()
if new.setfromdalines(source[-7:]):
u.sheetid = u.savesheet(new)
ul.saveuserlist()
flash("character has been overwritten with provided Dalines sheet!")
else:
flash("problem with " + source)
except Exception:
log.exception("setfromsource:")
flash(
"Sorry "
+ session.get("user").capitalize()
+ ", I can not let you do that."
)
return redirect(url_for("charsheet"))
@app.route("/timetest")
def timetest():
return str(time.time())
@app.route("/boardgame<int:size>_<seed>.json")
@app.route("/boardgame<int:size>_.json")
def boardgamemap(size, seed=""):
if size > 100:
size = 100
rx = random.Random()
if seed:
rx.seed(str(size) + str(seed))
def r(a=4):
for _ in range(a):
yield rx.randint(1, 10)
def e(inp, dif):
for i in inp:
yield 2 if i == 10 else (1 if i >= dif else 0)
def fpik(inp, pref="FPIK"):
vals = list(inp)
vals = [(v if v != 2 else (2 if sum(vals) < 4 else 1)) for v in vals]
for i, p in enumerate(pref):
yield '"' + p + '": ' + str(vals[i])
def cell(): # i, j):
difficulty = 8
"""6 + (
(9 if i == j else
8)
if i in [0, size - 1] and j in [0, size - 1] else
(7 if j in [0, size - 1] else
(6 if j % 2 == 1 and (i in [0, size - 1] or j in [0, size - 1]) else
(5 if 0 < i < size - 1 else 8))))"""
for li in fpik(e(r(), difficulty)):
yield li
first = True
def notfirst():
nonlocal first
if first:
first = False
return True
return False
def resetfirst():
nonlocal first
first = True
def generate():
yield '{"board": ['
for x in range(size):
| yield ("," if not f | irst else "") + "["
resetfirst()
for y in range(size):
yield ("" if notfirst() else ",") + '{ "x":%d, "y":%d, ' % (
x,
y,
) + ",".join(
cell(
# x, y
)
) + "}"
yield "]"
yield "]}"
return Response(generate(), mimetype="text/json")
@app.route("/gameboard/<int:size>/")
@app.route("/gameboard/<int:size>/<seed>")
def gameboard(size, seed=""):
if size > 20:
size = 20
return render_template("gameboard.html", size=size, seed=seed)
@app.route("/chargen/standard")
def standardchar():
return redirect(
url_for("chargen", a=3, b=5, c=7, abia=5, abib=9, abic=13, shuffle=1)
)
@app.route("/cards/", methods=["GET"])
@app.route("/cards/<command>", methods=["POST", "GET"])
def cards(command: str = None):
checklogin()
deck = Cards.getdeck(session["user"])
try:
if request.method == "GET":
if command is None:
return deck.serialized_parts
elif request.method == "POST":
par = request.get_json()["parameter"]
if command == "draw":
return {"result": list(deck.draw(par))}
elif command == "spend":
return {"result": list(deck.spend(par))}
elif command == "returnfun":
return {"result": list(deck.pilereturn(par))}
elif command == "dedicate":
if ":" not in par:
par += ":"
return {"result": list(deck.dedicate(*par.split(":", 1)))}
elif command == "remove":
return {"result": list(deck.remove(par))}
elif command == "free":
message = deck.undedicate(par)
for m in message:
flash("Affected Dedication: " + m)
return {"result": "ok", "messages": list(message)}
elif command == "free":
affected, message = deck.free(par)
for m in message:
flash("Affected Dedication: " + m)
return {
"result": list(affected),
"messages": message,
}
else:
return {"result": "error", "error": f"invalid command {command}"}
return render_template("cards.html", cards=deck)
except DescriptiveError as e:
return {"result": "error", "error": e.args[0]}
except TypeError:
return {"result": "error", "error": "Parameter is not in a valid Format"}
finally:
Cards.savedeck(session["user"], deck)
@app.route("/chargen", methods=["GET", "POST"])
def chargen_menu():
if request.method == "POST":
f = dict(request.form)
if not f.get("vampire", None):
return redirect(
url_for(
"chargen",
a=f["a"],
b=f["b"],
c=f["c"],
abia=f["abia"],
abib=f["abib"],
abic=f["abic"],
shuffle=1 if f.get("shuffle", 0) else 0,
)
)
return redirect(
url_for(
"chargen",
a=f["a"],
b=f["b"],
c=f["c"],
abia=f["abia"],
abib=f["abib"],
abic=f["abic"],
shuffle=1 if f["shuffle"] else 0,
vamp=f["discipline"],
back=f["back"],
)
)
return render_template("generate_dialog.html")
@app.route("/chargen/<a>,<b>,<c>,<abia>,<abib>,<abic>,<shuffle>")
@app.route("/chargen/<a>,<b>,<c>,<abia>,<abib>,<abic>,<shuffle>,<vamp>,<back>")
def chargen(a, b, c, abia, abib, abic, shuffle, vamp=None, back=None):
"""
Redirects to the charactersheet/ editor(if logged in) of a randomly
generated character
:param a: points to be allocated in the first attribute group
:param b: points to be allocated in the second attribute group
:param c: points to be allocated in the third attribute group
:param abia: points to be allocated in the first ability group
:param abib: points to be allocated in the second ability group
:param abic: points to be allocated in the third ability group
:param shuffle: if the first/second/third groups should be shuffled (each)
:param vamp: if not None, character will be a vampire, int(vamp)
is the amount of discipline points
:param back: background points
"""
try:
char = VampireCharacter.makerandom(
1,
5,
int(a),
int(b),
int(c),
int(abia),
int(abib),
int(abic),
int(shuffle),
|
'<group/>'
'<project>API_client_development</project>'
'<description>async testing</description>'
'</job>'
'<job id="78f491e7-714f-44c6-bddb-8b3b3a961ace">'
'<name>test_job_1</name>'
'<group/>'
'<project>API_client_development</project>'
'<description/>'
'</job>'
'</jobs>')
xml_tree = etree.fromstring(xml_str)
xmlp.parse(xml_tree, 'list', self.parser.jobs_parse_table)
def test_execution_creates_single_execution_correctly(self):
nt.assert_equal.__self__.maxDiff = 1000
test_data_file = path.join(config.rundeck_test_data_dir,
'execution.xml')
with open(test_data_file) as ex_fl:
xml_str = ex_fl.read()
expected = {
'id': '117',
'href': 'http://192.168.50.2:4440/execution/follow/117',
'status': 'succeeded',
'project': 'API_client_development',
'user': 'admin',
'date-started': {
'unixtime': '1437474661504',
'time': '2015-07-21T10:31:01Z'
},
'date-ended': {
'unixtime': '1437474662344',
'time': '2015-07-21T10:31:02Z'
},
'job': {
'id': '78f491e7-714f-44c6-bddb-8b3b3a961ace',
'averageDuration': '2716',
'name': 'test_job_1',
'group': '',
'project': 'API_client_development',
'description': '',
},
'description': 'echo "Hello"',
'argstring': '',
'successfulNodes': {
'list': [
{'name': 'localhost'}
]
}
}
xml_tree = etree.fromstring(xml_str)
nt.assert_equal(expected,
xmlp.parse(xml_tree, 'composite',
self.parser.execution_parse_table))
@raises(ParseError)
def test_execution_raises_if_given_wrong_tag(self):
xmlp.parse(self.bogus_xml, 'composite',
self.parser.execution_parse_table)
def test_date_creates_dates_correctly(self):
start_str = ('<date-started unixtime="1437474661504">'
'2015-07-21T10:31:01Z'
'</date-started>')
end_str = ('<date-ended unixtime="1437474662344">'
'2015-07-21T10:31:02Z'
'</date-ended>')
start_tree = etree.fromstring(start_str)
end_tree = e | tree.fromstring(end_str)
start_expected = {
'unixtime': '1437474661504',
'time': '2015-07-21T10:31:01Z'
}
| nt.assert_equal(start_expected,
xmlp.parse(start_tree, 'attribute text',
self.parser.start_date_parse_table))
end_expected = {
'unixtime': '1437474662344',
'time': '2015-07-21T10:31:02Z'
}
nt.assert_equal(end_expected,
xmlp.parse(end_tree, 'attribute text',
self.parser.date_ended_parse_table))
@raises(ParseError)
def test_date_raises_if_given_wrong_tag(self):
xmlp.parse(self.bogus_xml, 'attribute text',
self.parser.start_date_parse_table)
def test_node_creates_node_correctly(self):
xml_str = '<node name="localhost"/>'
xml_tree = etree.fromstring(xml_str)
expected = {'name': 'localhost'}
nt.assert_equal(expected, xmlp.parse(xml_tree, 'attribute',
self.parser.node_parse_table))
@raises(ParseError)
def test_node_raises_if_given_wrong_tag(self):
xmlp.parse(self.bogus_xml, 'text', self.parser.node_parse_table)
def test_nodes_create_node_list(self):
xml_str = ('<successfulNodes><node name="localhost"/>'
'<node name="otherhost"/></successfulNodes>')
xml_tree = etree.fromstring(xml_str)
expected = {'list': [{'name': 'localhost'}, {'name': 'otherhost'}]}
nt.assert_equal(expected,
xmlp.parse(xml_tree, 'list',
self.parser.successful_nodes_parse_table))
@raises(ParseError)
def test_nodes_raises_if_given_wrong_tag(self):
xmlp.parse(self.bogus_xml, 'list',
self.parser.successful_nodes_parse_table)
def test_option_creates_option_correctly(self):
xml_str = '<option name="arg1" value="foo"/>'
xml_tree = etree.fromstring(xml_str)
expected = {'name': 'arg1', 'value': 'foo'}
nt.assert_equal(expected,
xmlp.parse(xml_tree, 'attribute',
self.parser.option_parse_table))
@raises(ParseError)
def test_option_raises_if_given_wrong_tag(self):
xmlp.parse(self.bogus_xml, 'attribute', self.parser.option_parse_table)
def test_options_creates_option_list_correctly(self):
xml_str = ('<options>'
'<option name="arg1" value="foo"/>'
'<option name="arg2" value="bar"/>'
'</options>')
xml_tree = etree.fromstring(xml_str)
expected = {
'list': [
{'name': 'arg1', 'value': 'foo'},
{'name': 'arg2', 'value': 'bar'}
]
}
nt.assert_equal(expected, xmlp.parse(xml_tree, 'list',
self.parser.options_parse_table))
@raises(ParseError)
def test_options_raises_if_given_wrong_tag(self):
xmlp.parse(self.bogus_xml, 'list', self.parser.options_parse_table)
def test_executions_create_executions_array_correctly(self):
nt.assert_equal.__self__.maxDiff = 1000
test_data_file = path.join(config.rundeck_test_data_dir,
'executions.xml')
with open(test_data_file) as ex_fl:
xml_str = ex_fl.read()
xml_tree = etree.fromstring(xml_str)
expected = {
'count': 5,
'list': [
{
'argstring': '-arg1 foo',
'date-ended': {
'time': '2015-05-28T10:44:04Z',
'unixtime': '1432809844967'
},
'date-started': {
'time': '2015-05-28T10:44:04Z',
'unixtime': '1432809844290'
},
'description': 'echo $RD_OPTION_ARG1',
'href': 'http://192.168.50.2:4440/execution/follow/53',
'id': '53',
'job': {
'averageDuration': '1022',
'description': '',
'group': '',
'id': '3b8a86d5-4fc3-4cc1-95a2-8b51421c2069',
'name': 'job_with_args',
'options': {
'list': [{'name': 'arg1', 'value': 'foo'}]
},
'project': 'API_client_development'
},
'project': 'API_client_development',
'status': 'succeeded',
'successfulNodes': {'list': [{'name': 'localhost'}]},
'user': 'admin'
},
{
'argstring': '-arg1 foo',
'date-ended': {
'time': '2015-05-28T10:43:32Z',
'unixtime': '1432809812305'
},
'date-started': {
'time': '2015-05-28T10:43:31Z',
'unixtime': '1432809811697'
},
'description': 'echo $RD_OPTION_ARG1',
'href': 'http://192.168.50.2:4440/execution/follow/52',
'id': '52',
|
import unittest
from django.utils import inspect
class Person:
def no_arguments(self):
return None
def one_argument(self, something):
return something
def just_args(self, *args):
return args
def all_kinds(self, name, address='home', age=25, *args, **kwargs):
return kwargs
class | TestInspectMethods(unittest.TestCase):
def test_get_func_full_args_no_arguments(self):
self.assertEqual(inspect.get_func_full_args(Person.no_arguments), [])
def test_get_func_full_args_one_argument(self):
self.assertEqual(inspect.get_func_full_args(Person.one_argument), [('something',)])
def test_get_func_full_args_all_arguments(self):
arguments = [('name',), ('address', 'home'), ('age', 25), ('*args',), ('**kwargs',)]
self | .assertEqual(inspect.get_func_full_args(Person.all_kinds), arguments)
def test_func_accepts_var_args_has_var_args(self):
self.assertIs(inspect.func_accepts_var_args(Person.just_args), True)
def test_func_accepts_var_args_no_var_args(self):
self.assertIs(inspect.func_accepts_var_args(Person.one_argument), False)
def test_method_has_no_args(self):
self.assertIs(inspect.method_has_no_args(Person.no_arguments), True)
self.assertIs(inspect.method_has_no_args(Person.one_argument), False)
self.assertIs(inspect.method_has_no_args(Person().no_arguments), True)
self.assertIs(inspect.method_has_no_args(Person().one_argument), False)
|
from __f | uture__ import (absolute_import, print_function, division)
class NotImplementedException(Exce | ption):
pass
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation | (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI | CENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.models import (
Connection, DagModel, DagRun, DagTag, Pool, RenderedTaskInstanceFields, SlaMiss, TaskInstance, Variable,
errors,
)
from airflow.models.dagcode import DagCode
from airflow.utils.db import add_default_pool_if_not_exists, create_default_connections, \
create_session
def clear_db_runs():
with create_session() as session:
session.query(DagRun).delete()
session.query(TaskInstance).delete()
def clear_db_dags():
with create_session() as session:
session.query(DagTag).delete()
session.query(DagModel).delete()
def clear_db_sla_miss():
with create_session() as session:
session.query(SlaMiss).delete()
def clear_db_errors():
with create_session() as session:
session.query(errors.ImportError).delete()
def clear_db_pools():
with create_session() as session:
session.query(Pool).delete()
add_default_pool_if_not_exists(session)
def clear_db_connections():
with create_session() as session:
session.query(Connection).delete()
create_default_connections(session)
def clear_db_variables():
with create_session() as session:
session.query(Variable).delete()
def clear_db_dag_code():
with create_session() as session:
session.query(DagCode).delete()
def set_default_pool_slots(slots):
with create_session() as session:
default_pool = Pool.get_default_pool(session)
default_pool.slots = slots
def clear_rendered_ti_fields():
with create_session() as session:
session.query(RenderedTaskInstanceFields).delete()
|
import threading
# lock for each project or domain
# treat it as a singleton
# all file operation should be in lock region
class Locker(object):
_lock = {}
def acquire(self, name):
# create lock if it is not there
if name not in self._l | ock:
self._lock[name] = threading.Lock()
# acquire lock
self._lock[name].acquire()
def release(self, name):
# lock hasn't been created
if name not in self._lock:
return
try:
self._lock[name].release()
except:
pass
def remove(self, name):
# acquire lock first!!!
if name not in self._lock:
return
try:
l = self._lock[name]
| del self._lock[name] # remove lock name first, then release
l.release()
except:
pass
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either versio | n 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# alo | ng with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Registry definition for fixture datasets."""
from flask_registry import RegistryProxy
from invenio_ext.registry import ModuleAutoDiscoveryRegistry
from invenio_utils.datastructures import LazyDict
fixtures_proxy = RegistryProxy(
'fixtures', ModuleAutoDiscoveryRegistry, 'fixtures')
def fixtures_loader():
"""Load fixtures datasets."""
out = {}
for fixture in fixtures_proxy:
for data in getattr(fixture, '__all__', dir(fixture)):
if data[-4:] != 'Data' or data in out:
continue
out[data] = getattr(fixture, data)
return out
fixtures = LazyDict(fixtures_loader)
|
###
# Copyright (c) 2008, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# d | ocumentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior | written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from hashlib import md5
from hashlib import sha1 as sha
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
.config.identity.admin_password,
cls.config.identity.admin_tenant_name).tenant_id
cls.keypairs = {}
cls.security_groups = {}
cls.network = []
cls.servers = []
cls.floating_ips = []
def setUp(self):
super(TestVcenter, self).setUp()
self.check_clients_state()
def tearDown(self):
super(TestVcenter, self).tearDown()
if self.manager.clients_initialized:
if self.servers:
for server in self.servers:
try:
self._delete_server(server)
self.s | ervers.remove(server)
except Exception:
LOG.debug(traceback.format_exc())
LOG.debug("Server was already deleted.")
def test_1_vcenter_create_servers(self):
"""vCenter: Launch instance
Target component: Nova
Scenario:
| 1. Create a new security group (if it doesn`t exist yet).
2. Create an instance using the new security group.
3. Delete instance.
Duration: 200 s.
Available since release: 2014.2-6.1
Deployment tags: use_vcenter
"""
self.check_image_exists()
if not self.security_groups:
self.security_groups[self.tenant_id] = self.verify(
25,
self._create_security_group,
1,
"Security group can not be created.",
'security group creation',
self.compute_client)
name = rand_name('ost1_test-server-smoke-')
security_groups = [self.security_groups[self.tenant_id].name]
img_name = 'TestVM-VMDK'
server = self.verify(
200,
self._create_server,
2,
"Creating instance using the new security group has failed.",
'image creation',
self.compute_client, name, security_groups, None, None, img_name
)
self.verify(30, self._delete_server, 3,
"Server can not be deleted.",
"server deletion", server)
def test_3_vcenter_check_public_instance_connectivity_from_instance(self):
"""vCenter: Check network connectivity from instance via floating IP
Target component: Nova
Scenario:
1. Create a new security group (if it doesn`t exist yet).
2. Create an instance using the new security group.
3. Create a new floating IP
4. Assign the new floating IP to the instance.
5. Check connectivity to the floating IP using ping command.
6. Check that public IP 8.8.8.8 can be pinged from instance.
7. Disassociate server floating ip.
8. Delete floating ip
9. Delete server.
Duration: 300 s.
Available since release: 2014.2-6.1
Deployment tags: nova_network, use_vcenter
"""
self.check_image_exists()
if not self.security_groups:
self.security_groups[self.tenant_id] = self.verify(
25, self._create_security_group, 1,
"Security group can not be created.",
'security group creation',
self.compute_client)
name = rand_name('ost1_test-server-smoke-')
security_groups = [self.security_groups[self.tenant_id].name]
img_name = 'TestVM-VMDK'
server = self.verify(250, self._create_server, 2,
"Server can not be created.",
"server creation",
self.compute_client, name, security_groups, None,
None, img_name)
floating_ip = self.verify(
20,
self._create_floating_ip,
3,
"Floating IP can not be created.",
'floating IP creation')
self.verify(20, self._assign_floating_ip_to_instance,
4, "Floating IP can not be assigned.",
'floating IP assignment',
self.compute_client, server, floating_ip)
self.floating_ips.append(floating_ip)
ip_address = floating_ip.ip
LOG.info('is address is {0}'.format(ip_address))
LOG.debug(ip_address)
self.verify(600, self._check_vm_connectivity, 5,
"VM connectivity doesn`t function properly.",
'VM connectivity checking', ip_address,
30, (6, 60))
self.verify(600, self._check_connectivity_from_vm,
6, ("Connectivity to 8.8.8.8 from the VM doesn`t "
"function properly."),
'public connectivity checking from VM', ip_address,
30, (6, 60))
self.verify(10, self.compute_client.servers.remove_floating_ip,
7, "Floating IP cannot be removed.",
"removing floating IP", server, floating_ip)
self.verify(10, self.compute_client.floating_ips.delete,
8, "Floating IP cannot be deleted.",
"floating IP deletion", floating_ip)
if self.floating_ips:
self.floating_ips.remove(floating_ip)
self.verify(30, self._delete_server, 9,
"Server can not be deleted. ",
"server deletion", server)
def test_2_vcenter_check_internet_connectivity_without_floatingIP(self):
"""vCenter: Check network connectivity from instance without floating \
IP
Target component: Nova
Scenario:
1. Create a new security group (if it doesn`t exist yet).
2. Create an instance using the new security group.
(if it doesn`t exist yet).
3. Check that public IP 8.8.8.8 can be pinged from instance.
4. Delete server.
Duration: 300 s.
Available since release: 2014.2-6.1
Deployment tags: nova_network, use_vcenter
"""
self.check_image_exists()
if not self.security_groups:
self.security_groups[self.tenant_id] = self.verify(
25, self._create_security_group, 1,
"Security group can not be created.",
'security group creation', self.compute_client)
name = rand_name('ost1_test-server-smoke-')
security_groups = [self.security_groups[self.tenant_id].name]
img_name = 'TestVM-VMDK'
compute = None
server = self.verify(
250, self._create_server, 2,
"Server can not be created.",
'server creation',
self.compute_client, name, security_groups, None, None, img_name)
try:
for addr in server.addresses:
if addr.startswith('novanetwork'):
instance_ip = server.addresses[addr][0]['addr']
except Exception:
LOG.debug(traceback.format_exc())
self.fail("Step 3 failed: cannot get instance details. "
"Please refer to OpenStack logs for more details.")
self.verify(400, self._check_connectivity_from_vm,
3, ("Connectivity to 8.8.8.8 from the VM doesn`t "
"function properly."),
'public connectivity checking from VM',
instance_ip, 30, (6, 30), compute)
self.verify(30, self._delete_server, 4,
"Server can not be deleted. ",
"server deletion", server)
class TestVcenterImageAction(nmanager.SmokeChecksTest):
"""Test class verifies the following:
- verify that image can be created;
- verify that instance can be booted from created image;
- verify that snapshot can be created from an instance;
- verify that instance can be booted from a snapshot.
"""
@classmethod
def setUpClass(cls):
super(TestVcenterImageAction, cls).setUpClass()
if cls.manager.clients_initialized:
cls.micro_flavors = |
"""
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from ._function_transformer import FunctionTransformer
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import QuantileTransformer
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import quantile_transform
from .data import power_transform
from .data import PowerTransformer
from .data import PolynomialFeatures
from ._encoders import OneHotEncoder
from ._encoders import OrdinalEncoder
from .label import label_binarize
from .label import LabelBinarizer
fr | om .label import LabelEncoder
from .label import MultiLabelBinarizer
from ._discretization import KBinsDiscretizer
from .imputation import Imputer
# stub, remove in version 0.21
from .data import CategoricalEncoder # noqa
__all__ = [
'Binarizer',
'FunctionTransformer',
'Imputer',
'KBinsDiscretizer',
'KernelCen | terer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'QuantileTransformer',
'Normalizer',
'OneHotEncoder',
'OrdinalEncoder',
'PowerTransformer',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
'quantile_transform',
'power_transform',
]
|
et, field, self._augmented_sources_map):
return super(FetcherDataPortal, self).get_spot_value(
asset, field, dt, data_frequency)
# otherwise just return a fixed value
return int(asset)
# XXX: These aren't actually the methods that are used by the superclasses,
# so these don't do anything, and this class will likely produce unexpected
# results for history().
def _get_daily_window_for_sid(self, asset, field, days_in_window,
extra_slot=True):
return np.arange(days_in_window, dtype=np.float64)
def _get_minute_window_for_asset(self, asset, field, minutes_for_window):
return np.arange(minutes_for_window, dtype=np.float64)
class tmp_assets_db(object):
"""Create a temporary assets sqlite database.
This is meant to be used as a context manager.
Parameters
----------
url : string
The URL for the database connection.
**frames
The frames to pass to the AssetDBWriter.
By default this maps equities:
('A', 'B', 'C') -> map(ord, 'ABC')
See Also
--------
empty_assets_db
tmp_asset_finder
"""
_default_equities = sentinel('_default_equities')
def __init__(self,
url='sqlite:///:memory:',
equities=_default_equities,
**frames):
self._url = url
self._eng = None
if equities is self._default_equities:
equities = make_simple_equity_info(
list(map(ord, 'ABC')),
pd.Timestamp(0),
pd.Timestamp('2015'),
)
frames['equities'] = equities
self._frames = frames
self._eng = None # set in enter and exit
def __enter__(self):
self._eng = eng = create_engine(self._url)
AssetDBWriter(eng).write(**self._frames)
return eng
def __exit__(self, *excinfo):
assert self._eng is not None, '_eng was not set in __enter__'
self._eng.dispose()
self._eng = None
def empty_assets_db():
"""Context manager for creating an empty assets db.
See Also
--------
tmp_assets_db
"""
return tmp_assets_db(equities=None)
class tmp_asset_finder(tmp_assets_db):
"""Create a temporary asset finder using an in memory sqlite db.
Parameters
----------
url : string
The URL for the database connection.
finder_cls : type, optional
The type of asset finder to create from the assets db.
**frames
Forwarded to ``tmp_assets_db``.
See Also
--------
tmp_assets_db
"""
def __init__(self,
url='sqlite:///:memory:',
finder_cls=AssetFinder,
**frames):
self._finder_cls = finder_cls
super(tmp_asset_finder, self).__init__(url=url, **frames)
def __enter__(self):
return self._finder_cls(super(tmp_asset_finder, self).__enter__())
def empty_asset_finder():
"""Context manager for creating an empty asset finder.
See Also
--------
empty_assets_db
tmp_assets_db
tmp_asset_finder
"""
return tmp_asset_finder(equities=None)
class tmp_trading_env(tmp_asset_finder):
"""Create a temporary trading environment.
Parameters
----------
load : callable, optional
Function that returns benchmark returns and treasury curves.
finder_cls : type, optional
The type of asset finder to create from the assets db.
**frames
Forwarded to ``tmp_assets_db``.
See Also
--------
empty_trading_env
tmp_asset_finder
"""
def __init__(self, load=None, *args, **kwargs):
super(tmp_trading_env, self).__init__(*args, **kwargs)
self._load = load
def __enter__(self):
return TradingEnvironment(
load=self._load,
asset_db_path=super(tmp_trading_env, self).__enter__().engine,
)
def empty_trading_env():
return tmp_trading_env(equities=None)
class SubTestFailures(AssertionError):
def __init__(self, *failures):
self.failures = failures
def __str__(self):
return 'failures:\n %s' % '\n '.join(
'\n '.join((
', '.join('%s=%r' % item for item in scope.items()),
'%s: %s' % (type(exc).__name__, exc),
)) for scope, exc in self.failures,
)
@nottest
def subtest(iterator, *_names):
"""
Construct a subtest in a unittest.
Consider using ``catalyst.testing.parameter_space`` when subtests
are constructed over a single input or over the cross-product of multiple
inputs.
``subtest`` works by decorating a function as a subtest. The decorated
function will be run by iterating over the ``iterator`` and *unpacking the
values into the function. If any of the runs fail, the result will be put
into a set and the rest of the tests will be run. Finally, if any failed,
all of the results will be dumped as one failure.
Parameters
----------
iterator : iterable[iterable]
The iterator of arguments to pass to the function.
*name : iterator[str]
The names to use for each element of ``iterator``. These will be used
to print the scope when a test fails. If not provided, it will use the
integer index of the value as the name.
Examples
--------
::
class MyTest(TestCase):
def test_thing(self):
# Example usage inside another test.
@subtest(([n] for n in range(100000)), 'n')
def subtest(n):
self.assertEqual(n % 2, 0, 'n was not even')
subtest()
@subtest(([n] for n in range(100000)), 'n')
def test_decorated_function(self, n):
# Example usage to parameterize an entire function.
self.assertEqual(n % 2, 1, 'n was not odd')
Notes
-----
We use this when we:
* Will never want to run each parameter individually.
* Have a large parameter space we are testing
(see tests/utils/test_events.py).
``nose_parameterized.expand`` will create a test for each parameter
combination which bloats the test output and makes the travis pages slow.
We cannot use ``unittest2.TestCase.subTest`` because nose, pytest, and
nose2 do not support ``addSubTest``.
See Also
--------
catalyst.testing.parameter_space
"""
def dec(f):
@wraps(f)
def wrapped(*args, **kwargs):
names = _names
failures = []
for scope in iterator:
scope = tuple(scope)
try:
f(*args + scope, **kwargs)
except Exception as e:
if not names:
names = count()
failures.append((dict(zip(names, scope)), e))
if failures:
raise SubTestFailures(*failures)
return wrapped
return dec
class MockDailyBarReader(object):
def get_value(self, col, sid, dt):
return 100
def create_mock_adjustment_data(splits=None, dividends=None, mergers=None):
if splits is None:
splits = create_empty_splits_mergers_frame()
elif not isinstance(splits, pd.DataFrame):
splits = pd.DataFrame(splits)
if mergers is None:
mergers = create_empty_splits_mergers_frame()
elif not isinstance(mergers, pd.DataFrame):
mergers = pd.DataFrame(mergers)
if dividends is None:
dividends = create_empty_dividends_frame()
elif not isi | nstance(dividends, pd.DataFrame):
dividends = pd.DataFrame(dividends)
return splits, mergers, dividends
def create_mock_adjustments(tempdir, days, splits=None, dividends=None,
mergers=None):
path = tempdir.getpath("test_adjustments.db")
SQLiteAdjustmentWriter(path, MockDailyBarReader(), days).write(
*create_mock_adjustment_data(splits, divid | ends, mergers)
)
return path
def assert_timestamp_equal(left, right, compare_nat_equal=True, msg=""):
"""
|
e graph is ready to start a new frame.
'''
self.wait_graph_event(self.eventStartNewFrame)
def open(self, dummyCamera = False, dummyPath = None, callback=None):
'''
Open up the Temca C++ DLL.
If dummyCamera is True, create a dummy TEMCA image source using...
either a real camera, image, directory, or movie according to dummyPath which MUST be specified
as no default path is provided. If dummyPath is an integer string, then an OpenCV camera will be used
corresponding to that index.
'''
if callback == None:
callback = self.statusCallback
# prevent the callback from being garbage collected !!!
self.callback = STATUSCALLBACKFUNC(callback)
self.dummyPath = dummyPath
t = time.clock()
if not TemcaGraphDLL.open(dummyCamera, self.dummyPath, self.callback):
raise EnvironmentError('Cannot open TemcaGraphDLL. Possiblities: camera, is offline, not installed, or already in use')
logging.info("TemcaGraph DLL initialized in %s seconds" % (time.clock() - t))
self.eventInitCompleted.wait()
# get info about frame dimensions
fi = self.get_camera_info()
self.image_width = fi['width']
self.image_height = fi['height']
self.pixel_depth = fi['pixel_depth'] # 16 ALWAYS
self.camera_bpp = fi['camera_bpp'] # 12 for Ximea (upshift to full 16 bpp)
self.camera_model = fi['camera_model']
self.camera_id = fi['camera_id']
# if this is changed dynamically, reallocate preview frames
self.set_parameter('preview_decimation_factor', self.preview_decimation_factor)
def close(self):
'''
Close down all graphs.
'''
TemcaGraphDLL.close()
def set_mode(self, graphType):
'''
Sets the overall mode of operation for the Temca graph.
Each mode activates a subset of the overall graph.::
graphType SYNC ASYNC
-----------------------------------------------------
temca : ximea, postCap, QC Stitch
Focus
FileWriter
raw : ximea, postCap, FileWriter
preview : ximea, postCap, QC
Focus
'''
return TemcaGraphDLL.set_mode(graphType)
def set_parameter(self, parameter, value):
'''
General purpose way to set random parameters on the graph.
'value' must be an int. Valid parameters are::
'exposure' for Ximea, this is in microseconds
'gain' for Ximea, this is in dB * 1000
'preview_decimation_factor' (2, 4, 8, ...)
'''
TemcaGraphDLL.set_parameter(parameter, value)
def get_parameter(self, parameter):
'''
General purpose way to get random parameters on the graph.
Return value is an int. Valid parameters are given under set_parameter.
'''
return TemcaGraphDLL.get_parameter(parameter)
def get_camera_info(self):
'''
Returns a dictionary with details of the capture format including width, height, bytes per pixel, and the camera model and serial number.
'''
info = TemcaGraphDLL.get_camera_info()
return {'width' : info.width, 'height' : info.height,
'pixel_depth' : info.pixel_depth, 'camera_bpp' : info.camera_bpp,
'camera_model' : info.camera_model, 'camera_id' : info.camera_id}
def get_focus_info(self):
''' returns focus and astigmatism values, some calculated in CUDA, some in python '''
info = TemcaGraphDLL.get_focus_info()
astig_amp, astig_angle, offset, wave = fit_sin(info.astig_profile)
astig_score = astig_amp/np.ptp(info.astig_profile)
array_type = c_float*len(info.astig_profile)
astig_profile_pointer = cast(info.astig_profile, POINTER(array_type))
astig_numpy = np.frombuffer(astig_profile_pointer.contents, dtype=np.float32)
# return the profile?
return {'focus_score': info.focus_score, 'astig_score': astig_score, 'astig_angle' : astig_angle,
'astig_profile' : astig_numpy,}
def set_fft_size(self, dimension, start_freq, end_freq):
''' Set the dimension of the FFT (which must be a power of 2) and the start and end frequency for focus/astig measurement.
Both start and end frequencies must be less than dimension.
'''
TemcaGraphDLL.set_fft_size(dimension, start_freq, end_freq);
def get_qc_info(self):
''' Get the min, max, mean, and histogram from the last image acquired. '''
info = TemcaGraphDLL.get_qc_info()
array_type = c_int*len(info.histogram)
hist_profile_pointer = cast(info.histogram, POINTER(array_type))
hist_numpy = np.frombuffer(hist_profile_pointer.contents, dtype=np.int32)
return {'min':info.min_value, 'max': info.max_value, 'mean':info.mean_value, 'histogram':hist_numpy}
def grab_matcher_template(self, x, y, width, height):
''' Set the ROI to use as the template on the next image acquired. '''
TemcaGraphDLL.grab_matcher_template(x, y, width, height)
def get_matcher_info(self):
''' Return Match status from the matcher. If "good_matches" is 0, then the match operation failed'''
info = TemcaGraphDLL.get_matcher_info()
return {'dX': info.dX, 'dY': info.dY, 'distance': info.distance, 'rotation': info.rotation, 'good_matches': info.good_matches}
def get_status(self):
return TemcaGraphDLL.get_status()
def grab_frame(self, filename = "none", roiX = 0, roiY = 0):
'''
Trigger capture of a frame. This function does not wait for completion of anything.
'''
TemcaGraphDLL.grab_frame(filename, roiX, roiY)
def grab_frame_wait_completion(self, filename = "none", roiX = 0, roiY = 0):
'''
Trigger capture of a frame. This function waits for completion of all graphs.
'''
self.wait_start_of_frame()
self.grab_frame(filename, roiX, roiY) # filename doesn't matter in preview, nor does roi
self.wait_all_capture_events()
def allocate_frame(self):
'''
Allocate memory as a numpy array to hold a complete frame (16bpp grayscale).
'''
return np.zeros(shape=(self.image_width,self.image_height), dtype= np.uint16)
def allocate_preview_frame(self):
'''
Allocate memory as a numpy array to hold a preview frame (8bpp grayscale).
'''
return np.zeros(shape=(self.image_width/self.preview_decimation_factor,self.image_height/self.preview_decimation_factor), dtype= np.uint8)
def get_last_frame(self, img):
'''
Get a copy of the last frame captured as an ndarray (16bpp grayscale).
This must be called only after eventCapturePostProcessingCompleted has signaled and before the next frame is acquired.
'''
assert (img.shape == (self.image_width, self.image_height) and (img.dtype.type == np.uint16))
TemcaGraphDLL.get_last_frame(img)
def get_preview_frame(self, img):
'''
Get a copy of the preview image as an ndarray (8bpp grayscale).
This must be called only after eventCapturePostProcessingCompleted has signaled and before the next frame is acquired.
'''
assert (img.shape == (self.image_width/self.preview_decimation_factor, self.image_height/self.preview_decimation_factor) and (img.dtype.type == np.uint8))
TemcaGraphDLL.get_preview_frame(img)
def optimize_exposure(self):
'''
Search for optimal exposure value using binary search.
'''
| min_high_value = 61000
max_high_value = 63000
exposure_step = 100000 #uS
| self.set_mode('preview')
exp = self.get_parameter('exposure')
def _searchDirection():
''' retur |
#!/usr/bin/env python
import paramiko
from datetime import datetime
#hostname='192.168.0.102'
host | name='172.28.102.250'
username='root'
password='abc'
#port=22
if __name__=='__main__':
paramiko.util.log_to_file('paramiko.log')
s=paramiko.SSHClient()
#s.load_system_host_keys()
s.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
s.connect(hostname = hostname,username=username, password=password)
stdin,stdout,stderr=s | .exec_command('ifconfig;free;df -h')
print stdout.read()
except:
print "fuck"
f = file('paramiko.log','w')
f.write(" ".join([str(datetime.now()), "fuck\n"]))
f.close()
else:
print "how"
finally:
print "super fuck"
s.close()
|
# Find Objects Partially Inside Region of Interest or Cut Objects to Region of Interest
import cv2
import numpy as np
from . import print_image
from . import plot_image
from . import fatal_error
def roi_objects(img, roi_type, roi_contour, roi_hierarchy, object_contour, obj_hierarchy, device, debug=None):
"""Find objects partially inside a region of interest or cut objects to the ROI.
Inputs:
img = img to display kept objects
roi_type = 'cutto' or 'partial' (for partially inside)
roi_contour = contour of roi, output from "View and Adjust ROI" function
roi_hierarchy = contour of roi, output from "View and Adjust ROI" function
object_contour = contours of objects, output from "Identifying Objects" function
obj_hierarchy = hierarchy of objects, output from "Identifying Objects" function
device = device number. Used to count steps in the pipeline
debug = None, print, or plot. Print = save to file, Plot = print to screen.
Returns:
device = device number
kept_cnt = kept contours
hierarchy = contour hierarchy list
mask = mask image
obj_area = total object pixel area
:param img: numpy array
:param roi_type: str
:param roi_contour: list
:param roi_hierarchy: list
:param object_contour: list
:param obj_hierarchy: list
:param device: int
:param debug: str
:return device: int
:return kept_cnt: list
:return hierarchy: list
:return mask: numpy array
:return obj_area: int
"""
device += 1
if len(np.shape(img)) == 3:
ix, iy, iz = np.shape(img)
else:
ix, iy = np.shape(img)
size = ix, iy, 3
background = np.zeros(size, dtype=np.uint8)
ori_img = np.copy(img)
w_back = background + 255
background1 = np.zeros(size, dtype=np.uint8)
background2 = np.zeros(size, dtype=np.uint8)
# Allows user to find all objects that are completely inside or overlapping with ROI
if roi_type == 'partial':
for c, cnt in enumerate(object_contour):
length = (len(cnt) - 1)
stack = np.vstack(cnt)
test = []
keep = False
for i in range(0, length):
pptest = cv2.pointPolygonTest(roi_contour[0], (stack[i][0], stack[i][1]), False)
if int(pptest) != -1:
keep = True
if keep == True:
if obj_hierarchy[0][c][3] > -1:
cv2.drawContours(w_back, object_contour, c, (255, 255, 255), -1, lineType=8,
hierarchy=obj_hierarchy)
else:
cv2.drawContours(w_back, object_contour, c, (0, 0, 0), -1, lineType=8, hierarchy=obj_hierarchy)
else:
cv2.drawContours(w_back, object_contour, c, (255, 255, 255), -1, lineType=8, hierarchy=obj_hierarchy)
kept = cv2.cvtColor(w_back, cv2.COLOR_RGB2GRAY)
kept_obj = cv2.bitwise_not(kept)
mask = np.copy(kept_obj)
obj_area = cv2.countNonZero(kept_obj)
kept_cnt, hierarchy = cv2.findContours(kept_obj, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(ori_img, kept_cnt, -1, (0, 255, 0), -1, lineType=8, hierarchy=hierarchy)
cv2.drawContours(ori_img, roi_contour, -1, (255, 0, 0), 5, lineType=8, hierarchy=roi_hierarchy)
# Allows user to cut objects to the ROI (all objects completely outside ROI will not be kept)
elif roi_type == 'cutto':
cv2.drawContours(background1, object_contour, -1, (255, 255, 255), -1, lineType=8, hierarchy=obj_hierarchy)
roi_points = np.vstack(roi_contour[0])
cv2.fillPoly(background2, [roi_points], (255, 255, 255))
obj_roi = cv2.multiply(background1, background2)
kept_obj = cv2.cvtColor(obj_roi, cv2.COLOR_RGB2GRAY)
mask = np.copy(kept_obj)
obj_area = cv2.countNonZero(kept_obj)
kept_cnt, hierarchy = cv2.findContours(kept_obj, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
| cv2.drawContours(w_back, kept_cnt, | -1, (0, 0, 0), -1)
cv2.drawContours(ori_img, kept_cnt, -1, (0, 255, 0), -1, lineType=8, hierarchy=hierarchy)
cv2.drawContours(ori_img, roi_contour, -1, (255, 0, 0), 5, lineType=8, hierarchy=roi_hierarchy)
else:
fatal_error('ROI Type' + str(roi_type) + ' is not "cutto" or "partial"!')
if debug == 'print':
print_image(w_back, (str(device) + '_roi_objects.png'))
print_image(ori_img, (str(device) + '_obj_on_img.png'))
print_image(mask, (str(device) + '_roi_mask.png'))
elif debug == 'plot':
plot_image(w_back)
plot_image(ori_img)
plot_image(mask, cmap='gray')
# print ('Object Area=', obj_area)
return device, kept_cnt, hierarchy, mask, obj_area
|
rgets)
skipped_python_versions = sorted_versions(list(set(SUPPORTED_PYTHON_VERSIONS) - {target.python.version for target in targets}))
else:
skipped_python_versions = []
filtered_args = old_options.purge_args(argv)
filtered_args = filter_args(filtered_args, {name: 1 for name in new_options})
host_settings = HostSettings(
controller=controller,
targets=targets,
skipped_python_versions=skipped_python_versions,
filtered_args=filtered_args,
controller_fallback=controller_fallback,
)
return host_settings
def controller_targets(
mode, # type: TargetMode
options, # type: LegacyHostOptions
controller, # type: ControllerHostConfig
): # type: (...) -> t.List[ControllerConfig]
"""Return the configuration for controller targets."""
python = native_python(options)
if python:
targets = [ControllerConfig(python=python)]
else:
targets = default_targets(mode, controller)
return targets
def native_python(options): # type: (LegacyHostOptions) -> t.Optional[NativePythonConfig]
"""Return a NativePythonConfig for the given version if it is not None, otherwise return None."""
if not options.python and not options.python_interpreter:
return None
return NativePythonConfig(version=options.python, path=options.python_interpreter)
def get_legacy_host_config(
mode, # type: TargetMode
options, # type: LegacyHostOptions
): # type: (...) -> t.Tuple[HostConfig, t.List[HostConfig], t.Optional[FallbackDetail]]
"""
Returns controller and target host configs derived from the provided legacy host options.
The goal is to match the original behavior, by using non-split testing whenever possible.
When the options support the controller, use the options for the controller and use ControllerConfig for the targets.
When the options do not support the controller, use the options for the targets and use a default controller config influenced by the options.
"""
venv_fallback = 'venv/default'
docker_fallback = 'default'
remote_fallback = get_fallback_remote_controller()
controller_fallback = None # type: t.Optional[t.Tuple[str, str, FallbackReason]]
if options.venv:
if controller_python(options.python) or not options.python:
controller = OriginConfig(python=VirtualPythonConfig(version=options.python or 'default', system_site_packages=options.venv_system_site_packages))
else:
controller_fallback = f'origin:python={venv_fallback}', f'--venv --python {options.python}', FallbackReason.PYTHON
controller = OriginConfig(python=VirtualPythonConfig(version='default', system_site_packages=options.venv_system_site_packages))
if mode in (TargetMode.SANITY, TargetMode.UNITS):
targets = controller_targets(mode, options, controller)
# Target sanity tests either have no Python requirements or manage their own virtual environments.
# Thus there is no point in setting up virtual environments ahead of time for them.
if mode == TargetMode.UNITS:
targets = [ControllerConfig(python=VirtualPythonConfig(version=target.python.version, path=target.python.path,
system_site_packages=options.venv_system_site_packages)) for target in targets]
else:
targets = [ControllerConfig(python=VirtualPythonConfig(version=options.python or 'default',
system_site_packages=options.venv_system_site_packages))]
elif options.docker:
docker_config = filter_completion(DOCKER_COMPLETION).get(options.docker)
if docker_config:
if options.python and options.python not in docker_config.supported_pythons:
raise PythonVersionUnsupportedError(f'--docker {options.docker}', options.python, docker_config.supported_pythons)
if docker_config.controller_supported:
if controller_python(options.python) or not options.python:
controller = DockerConfig(name=options.docker, python=native_python(options),
privileged=options.docker_privileged, seccomp=options.docker_seccomp, memory=options.docker_memory)
targets = controller_targets(mode, options, controller)
else:
controller_fallback = f'docker:{options.docker}', f'--docker {options.docker} --python {options.python}', FallbackReason.PYTHON
controller = DockerConfig(name=options.docker)
targets = controller_targets(mode, options, controller)
else:
controller_fallback = f'docker:{docker_fallback}', f'--docker {options.docker}', FallbackReason.ENVIRONMENT
controller = DockerConfig(name=docker_fallback)
targets = [Dock | erConfig(name=options.docker, python=native_python(options),
privileged=options.docker_privileged, seccomp=options.docker_seccomp, memory=options.docker_memory)]
else:
if not options.python:
raise PythonVersionUnspecifiedError(f'--docker {options.docker}')
if controller_python(options.python):
controller = DockerConfig(name=options.docker, python=native_python(options),
| privileged=options.docker_privileged, seccomp=options.docker_seccomp, memory=options.docker_memory)
targets = controller_targets(mode, options, controller)
else:
controller_fallback = f'docker:{docker_fallback}', f'--docker {options.docker} --python {options.python}', FallbackReason.PYTHON
controller = DockerConfig(name=docker_fallback)
targets = [DockerConfig(name=options.docker, python=native_python(options),
privileged=options.docker_privileged, seccomp=options.docker_seccomp, memory=options.docker_memory)]
elif options.remote:
remote_config = filter_completion(REMOTE_COMPLETION).get(options.remote)
context, reason = None, None
if remote_config:
if options.python and options.python not in remote_config.supported_pythons:
raise PythonVersionUnsupportedError(f'--remote {options.remote}', options.python, remote_config.supported_pythons)
if remote_config.controller_supported:
if controller_python(options.python) or not options.python:
controller = PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider)
targets = controller_targets(mode, options, controller)
else:
controller_fallback = f'remote:{options.remote}', f'--remote {options.remote} --python {options.python}', FallbackReason.PYTHON
controller = PosixRemoteConfig(name=options.remote, provider=options.remote_provider)
targets = controller_targets(mode, options, controller)
else:
context, reason = f'--remote {options.remote}', FallbackReason.ENVIRONMENT
controller = None
targets = [PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider)]
elif mode == TargetMode.SHELL and options.remote.startswith('windows/'):
if options.python and options.python not in CONTROLLER_PYTHON_VERSIONS:
raise ControllerNotSupportedError(f'--python {options.python}')
controller = OriginConfig(python=native_python(options))
targets = [WindowsRemoteConfig(name=options.remote, provider=options.remote_provider)]
else:
if not options.python:
raise PythonVersionUnspecifiedError(f'--remote {options.remote}')
if controller_python(options.python):
c |
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the B | SD lice | nse. See the LICENSE file for details.
"""
from __future__ import absolute_import
from .build_response import BuildResponse # noqa
from .pod_response import PodResponse # noqa
|
#!/usr/bin/python
#
# linearize.py: Construct a linear, no-fork, best version of the blockchain.
#
#
# Copyright (c) 2013 The SpeedCoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import json
import struct
import re
import base64
import httplib
import sys
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
class SpeedCoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblock(self, hash, verbose=True):
return self.rpc('getblock', [hash, verbose])
def getblockhash(self, index):
return self.rpc('getblockhash', [index])
def getblock(rpc, settings, n):
hash = rpc.getblockhash(n)
hexdata = rpc.getblock(hash, False)
data = hexdata.decode('hex')
return data
def get_blocks(settings):
rpc = SpeedCoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
outf = open(settings['output'], 'ab')
for height in xrange(settings['min_height'], settings['max_height']+1):
data = getblock(rpc, settings, height)
outhdr = settings['netmagic']
outhdr += struct.pack("<i", len(data))
outf.write(outhdr)
outf.write(data)
if (height % 1000) == 0:
sys.stdout.write("Wrote block " + str(height) + "\n")
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: linearize.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'netmagic' not in settings:
settings['netmagic'] = 'f9beb4d9'
if 'output' not in settings:
settings['output'] = 'bootstrap.dat'
if 'host' not in settings:
settings['host'] = '127.0.0.1'
| if 'port' not in settings:
settings['port'] = 8332
if 'min_height' not in settings | :
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 279000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['netmagic'] = settings['netmagic'].decode('hex')
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_blocks(settings)
|
return
def exit():
logme('Program execution halted.')
log.close()
os.sys.exit(1)
# run external process
def runSubprocess(command_array):
# command array is array with command and all required parameters
try:
with open(os.devnull, 'w') as fp:
sp = subprocess.Popen(command_array, stderr=fp, stdout=fp)
# logme('Running subprocess ("%s" %s)...'%(' '.join(command_array), sp.pid))
sp.wait()
output, error = sp.communicate()
return (output, error, sp.pid)
except:
logme('Error. Subprocess ("%s" %d) failed.' %
(' '.join(command_array), sp.pid))
return ('', '', 0)
# get current ra/dec of target asteroid
def getAsteroidRaDec(name, dt):
ra = ''
dec = ''
start = dt
end = dt + timedelta(minutes=1)
# get ephemerides for target in JPL Horizons from start to end times
result = ch.query(name.upper(), smallbody=True)
result.set_epochrange(start.isoformat(), end.isoformat(), '1m')
result.get_ephemerides(obs_code)
if result and len(result['EL']):
ra = result['RA'][0]
dec = result['DEC'][0]
else:
logme('Error. Asteroid (%s) not found for %s.' %
(name, start.isoformat()))
exit()
return (ra, dec)
def jdToYYMMDD_HHMMSS(jd):
t = Time(jd, format='mjd', scale='utc')
return t.iso
# open log file
log = open(log_fname, 'a+')
# set up the command line argument parser
parser = argparse.ArgumentParser(
description='Perform lightcurve photometry using sextractor.')
# parser.add_argument('asteroid', metavar='asteroid#', type=int,
# help='Target asteroid number')
args = parser.parse_args()
# make sure input files and folder exist
inputs = [input_path, sextractor_bin_fname, sextractor_cfg_fname,
sextractor_param_fname, sextractor_filter_fname, comps_fname]
for input in inputs:
if not os.path.exists(input_path):
logme('Error. The file or path (%s) does not exist.' % input)
exit()
# does output directory exist? If not, create it...
outputs = [sex_output_path, cleaned_output_path, bad_path]
for output in outputs:
try:
os.mkdir(output)
except:
pass
image_data = []
# get a list of all FITS files in the input directory
fits_files = glob.glob(input_path+'*.fits')+glob.glob(input_path+'*.fit')
# loop through all qualifying files and perform sextraction
for fits_file in sorted(fits_files):
fits_data = fits.open(fits_file)
header = fits_data[0].header
wcs = WCS(header)
airmass = header['AIRMASS']
try:
dt_obs = dateutil.parser.parse(header['DATE-OBS'])
except:
logme('Error. Invalid observation date found in %s.' % fits_file)
exit()
try:
naxis1 = header['NAXIS1']
naxis2 = header['NAXIS2']
except:
logme('Error. Invalid CCD pixel size found in %s.' % fits_file)
exit()
try:
ra = header['CRVAL1']
dec = header['CRVAL2']
except:
logme('Error. Invalid RA/DEC found in %s.' % fits_file)
exit()
try:
JD = header['MJD-OBS']
except KeyError:
JD = header['JD']
# calculate image corners in ra/dec
ra1, dec1 = wcs.all_pix2world(0, 0, 0)
ra2, dec2 = wcs.all_pix2world(naxis1, naxis2, 0)
# calculate search radius in degrees from the center!
c1 = SkyCoord(ra1, dec1, unit="deg")
c2 = SkyCoord(ra2, dec2, unit="deg")
# estimate radius of FOV in arcmin
r_arcmin = '%f' % (c1.separation(c2).deg*60/2)
logme("Sextracting %s" % (fits_file))
output_file = sex_output_path + \
fits_file.replace('\\', '/').rsplit('/', 1)[1]
output_file = '%s%s.txt' % (output_file, sex_output_suffix)
# add input filename, output filename, airmass, and jd to sex_file list
image_data.append(
{'image': fits_file, 'sex': output_file, 'jd': JD, 'airmass': airmass, 'ra': ra, 'dec': dec, 'dt_obs': dt_obs, 'r_arcmin': r_arcmin})
# sextract this file
(output, error, id) = runSubprocess([sextractor_bin_fname, fits_file, '-c', sextractor_cfg_fname, '-catalog_name',
output_file, '-parameters_name', sextractor_param_fname, '-filter_name', sextractor_filter_fname])
if error:
logme('Error. Sextractor failed: %s' % output)
exit()
logme('Sextracted %d files.' % len(image_data))
# build list of comparison stars in comps_fname using
# PanSTARRS Stack Object Catalog Search
logme('Searching for comparison stars in the PANSTARRS catalog (ra=%s deg, dec=%s deg, radius=%s min)...' %
(image_data[0]['ra'], image_data[0]['dec'], image_data[0]['r_arcmin']))
pso_url_base = 'http://archive.stsci.edu/panstarrs/stackobject/search.php'
pso_url_parms = '?resolver=Resolve&radius=%s&ra=%s&dec=%s&equinox=J2000&nDetections=&selectedColumnsCsv=objname%%2Cobjid%%2Cramean%%2Cdecmean%%2Cgpsfmag%%2Crpsfmag%%2Cipsfmag' + \
'&coordformat=dec&outputformat=CSV_file&skipformat=on' + \
'&max_records=50001&action=Search'
url = pso_url_base + \
pso_url_parms % (image_data[0]['r_arcmin'], image_data[0]['ra'], image_data[0]
['dec'])
# get the results of the REST query
comps = pd.read_csv(url)
if len(comps) <= 0:
logme('Error. No comparison stars found!')
exit()
# remove dupes, keep first
comps.drop_duplicates(subset=['objName'], keep='first', inplace=True)
# make sure magnitudes are treated as floats
comps[pso_ref_mag] = pd.to_numeric(comps[pso_ref_mag], errors='coerce')
# remove spaces from obj names
comps['objName'] = comps['objName'].str.replace('PSO ', '')
# filter based on ref (r?) magnitude!
comps = comps.query("%s > %f & %s < %f" %
(pso_ref_mag, pso_min_mag, pso_ref_mag, pso_max_mag))
if len(comps) <= 0:
logme('Error. No comparison stars meet the criteria (%s > %f & %s < %f)!' %
(pso_ref_mag, pso_min_mag, pso_ref_mag, pso_max_mag))
exit()
logme('A total of %d comparison star(s) met the criteria (%s > %f & %s < %f)!' %
(len(comps), pso_ref_mag, pso_min_mag, pso_ref_mag, pso_max_mag))
# output objects to comps_fname in sextract input format
comps_for_sex = comps[['raMean', 'decMean', 'objName']]
comps_for_sex.to_csv(comps_fname, sep=' ', index=False, header=False)
# read ra/dec from target/comp stars list
# this is legacy and duplicative, but we will go with it
object_data = []
sfile = file('%s' % comps_fname, 'rt')
lines = [s for s in sfile if len(s) > 2 and s[0] != '#']
sfile.close()
count = 0
target_index = -1
for index, l in enumerate(lines):
spl = l.split()
ra = float(spl[0])
dec = float(spl[1])
name = spl[2]
object_data.append(
{'index': index, 'ra': ra, 'dec': dec, 'object_name': name, 'found': True})
# add the asteroid to the object list
# we don't know the ra/dec yet until we get the date/time from the FITS file
#target_index = index + 1
# object_data.append({'index': target_index, 'ra': '',
# 'dec': '', 'object_name': '%d' % args.asteroid, 'found': True})
logme('Searching for %d objects in sextracted data.' % len(object_data))
ofile = file(counts_out_fname, 'wt')
# look for target/comp matches in sextracted files
counts = []
images = []
for image in image_data:
num_found = 0
| lines = [s for s in file(image['sex'], 'rt') if len(s) > 2]
# unless object is target, stop looking for it if it was not found in one of the images
for s in (x for x in object_data):
found = False
# assign the asteroid ra/dec
# if s['object_name'] == '%d' % args.asteroid:
# | # get ra/dec of asteroid at the time image was taken
# (s['ra'], s['dec']) = getAsteroidRaDec(
# s['object_name'], image['dt_obs'])
for l in lines:
spl = l.split()
ra = float(spl[0])
dec = float(spl[1])
if abs(ra-s['ra']) < dRa and abs(dec-s['dec']) < dDec:
num_found += 1
break
images.append(image['image'])
counts.append(num_found)
ofile.write('%s,%d\n' % (image['sex'], num_found))
ofile.close()
mode = np.bincount(counts).argmax()
st |
from worldengine.simulations.basic import find_threshold_f
import numpy
class HumiditySimulation(object):
@staticmethod
def is_applicable(world):
return world.has_precipitations() and world.has_irrigation() and (
not world.has_humidity())
def execute(self, world, seed):
assert seed is not None
data, quantiles = self._calculate(world)
world.humidity = (data, quantiles)
@staticmethod
def _calculate(world):
humids = world.humids
precipitationWeight = 1.0
irrigationWeight = 3
data = numpy.zeros((world.height, world.width), dtype=float)
data = (world.layers['precipitation'].data * precipitationWeight - world.layers['irrigation'].data * irrigationWeight)/(prec | ipitationWeight + irrigationWeight)
# These were originally evenly spaced at 12.5% each but changing them
# to a bell curve produced better results
ocean = world.layers['ocean'].data
quantiles = {}
quantiles['12'] = find_threshold_f(data, humids[6], ocean)
quantiles['25'] = find_threshold_f(data, humids[5], ocean)
| quantiles['37'] = find_threshold_f(data, humids[4], ocean)
quantiles['50'] = find_threshold_f(data, humids[3], ocean)
quantiles['62'] = find_threshold_f(data, humids[2], ocean)
quantiles['75'] = find_threshold_f(data, humids[1], ocean)
quantiles['87'] = find_threshold_f(data, humids[0], ocean)
return data, quantiles
|
line:
url = self.feedurl + "/" + box + "/" + sel
u = urllib2.urlopen(url)
f = open(file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (sel, file_size)
job = ImageDownloadJob(url, file_name, sel)
job.afterEvent = "close"
job_manager.AddJob(job)
job_manager.failed_jobs = []
self.session.openWithCallback(self.ImageDownloadCB, JobView, job, backgroundable = False, afterEventChangeable = False)
else:
if sel == str(flashTmp):
self.Start_Flashing()
else:
self.unzip_image(self.filename, flashPath)
def ImageDownloadCB(self, ret):
if ret:
return
if job_manager.active_job:
job_manager.active_job = None
self.close()
return
if len(job_manager.failed_jobs) == 0:
self.session.openWithCallback(self.askUnzipCB, MessageBox, _("The image is downloaded. Do you want to flash now?"), MessageBox.TYPE_YESNO)
else:
self.session.open(MessageBox, _("Download Failed !!"), type = MessageBox.TYPE_ERROR)
def askUnzipCB(self, ret):
if ret:
self.unzip_ima | ge(self.filename, flashPath)
else:
self.show()
def | unzip_image(self, filename, path):
print "Unzip %s to %s" %(filename,path)
self.session.openWithCallback(self.cmdFinished, Console, title = _("Unzipping files, Please wait ..."), cmdlist = ['unzip ' + filename + ' -d ' + path, "sleep 3"], closeOnSuccess = True)
def cmdFinished(self):
self.prepair_flashtmp(flashPath)
self.Start_Flashing()
def Start_Flashing(self):
print "Start Flashing"
if os.path.exists(ofgwritePath):
text = _("Flashing: ")
if self.simulate:
text += _("Simulate (no write)")
cmd = "%s -n -r -k %s > /dev/null 2>&1" % (ofgwritePath, flashTmp)
self.close()
message = "echo -e '\n"
message += _('Show only found image and mtd partitions.\n')
message += "'"
else:
text += _("root and kernel")
cmd = "%s -r -k %s > /dev/null 2>&1" % (ofgwritePath, flashTmp)
message = "echo -e '\n"
message += _('ofgwrite will stop enigma2 now to run the flash.\n')
message += _('Your STB will freeze during the flashing process.\n')
message += _('Please: DO NOT reboot your STB and turn off the power.\n')
message += _('The image or kernel will be flashing and auto booted in few minutes.\n')
if getBoxType() == 'gb800solo':
message += _('GB800SOLO takes about 20 mins !!\n')
message += "'"
self.session.open(Console, text,[message, cmd])
def prepair_flashtmp(self, tmpPath):
if os.path.exists(flashTmp):
os.system('rm -rf ' + flashTmp)
os.mkdir(flashTmp)
kernel = True
rootfs = True
for path, subdirs, files in os.walk(tmpPath):
for name in files:
if name.find('kernel') > -1 and name.endswith('.bin') and kernel:
binfile = os.path.join(path, name)
dest = flashTmp + '/kernel.bin'
shutil.copyfile(binfile, dest)
kernel = False
elif name.find('root') > -1 and (name.endswith('.bin') or name.endswith('.jffs2')) and rootfs:
binfile = os.path.join(path, name)
dest = flashTmp + '/rootfs.bin'
shutil.copyfile(binfile, dest)
rootfs = False
def yellow(self):
if not self.Online:
self.session.openWithCallback(self.DeviceBrowserClosed, DeviceBrowser, None, matchingPattern="^.*\.(zip|bin|jffs2)", showDirectories=True, showMountpoints=True, inhibitMounts=["/autofs/sr0/"])
def DeviceBrowserClosed(self, path, filename, binorzip):
if path:
print path, filename, binorzip
strPath = str(path)
if strPath[-1] == '/':
strPath = strPath[:-1]
self.imagePath = strPath
if os.path.exists(flashTmp):
os.system('rm -rf ' + flashTmp)
os.mkdir(flashTmp)
if binorzip == 0:
for files in os.listdir(self.imagePath):
if files.endswith(".bin") or files.endswith('.jffs2'):
self.prepair_flashtmp(strPath)
break
self.Start_Flashing()
elif binorzip == 1:
self.unzip_image(strPath + '/' + filename, flashPath)
else:
self.layoutFinished()
else:
self.imagePath = imagePath
def layoutFinished(self):
box = getBoxType()
self.imagelist = []
if self.Online:
self["key_yellow"].setText("")
self.feedurl = urlimage
self["key_blue"].setText("")
url = '%s/%s/index.php' % (self.feedurl,box)
#url = '%s/index.php?open=%s' % (self.feedurl,box)
req = urllib2.Request(url)
try:
response = urllib2.urlopen(req)
except urllib2.URLError as e:
print "URL ERROR: %s" % e
return
try:
the_page = response.read()
except urllib2.HTTPError as e:
print "HTTP download ERROR: %s" % e.code
return
lines = the_page.split('\n')
tt = len(box)
for line in lines:
if line.find("<a href='%s/" % box) > -1:
t = line.find("<a href='%s/" % box)
self.imagelist.append(line[t+tt+10:t+tt+tt+40])
else:
self["key_blue"].setText(_("Delete"))
self["key_yellow"].setText(_("Devices"))
for name in os.listdir(self.imagePath):
if name.endswith(".zip"): # and name.find(box) > 1:
self.imagelist.append(name)
self.imagelist.sort()
if os.path.exists(flashTmp):
for file in os.listdir(flashTmp):
if file.find(".bin") > -1:
self.imagelist.insert( 0, str(flashTmp))
break
self["imageList"].l.setList(self.imagelist)
class ImageDownloadJob(Job):
def __init__(self, url, filename, file):
Job.__init__(self, _("Downloading %s" %file))
ImageDownloadTask(self, url, filename)
class DownloaderPostcondition(Condition):
def check(self, task):
return task.returncode == 0
def getErrorMessage(self, task):
return self.error_message
class ImageDownloadTask(Task):
def __init__(self, job, url, path):
Task.__init__(self, job, _("Downloading"))
self.postconditions.append(DownloaderPostcondition())
self.job = job
self.url = url
self.path = path
self.error_message = ""
self.last_recvbytes = 0
self.error_message = None
self.download = None
self.aborted = False
def run(self, callback):
self.callback = callback
self.download = downloadWithProgress(self.url,self.path)
self.download.addProgress(self.download_progress)
self.download.start().addCallback(self.download_finished).addErrback(self.download_failed)
print "[ImageDownloadTask] downloading", self.url, "to", self.path
def abort(self):
print "[ImageDownloadTask] aborting", self.url
if self.download:
self.download.stop()
self.aborted = True
def download_progress(self, recvbytes, totalbytes):
if ( recvbytes - self.last_recvbytes ) > 10000: # anti-flicker
self.progress = int(100*(float(recvbytes)/float(totalbytes)))
self.name = _("Downloading") + ' ' + "%d of %d kBytes" % (recvbytes/1024, totalbytes/1024)
self.last_recvbytes = recvbytes
def download_failed(self, failure_instance=None, error_message=""):
self.error_message = error_message
if error_message == "" and failure_instance is not None:
self.error_message = failure_instance.getErrorMessage()
Task.processFinished(self, 1)
def download_finished(self, string=""):
if self.aborted:
self.finish(aborted = True)
else:
Task.processFinished(self, 0)
class DeviceBrowser(Screen, HelpableScreen):
skin = """
<screen name="DeviceBrowser" position="center,center" size="520,430" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="message" render="Label" position="5,50" size="510,150" font="Regular;16" />
<widget name="filelist" position="5,210" size="510 |
db_table_intersect = set(new_priv.keys()) & set(curr_priv.keys())
for db_table in db_table_intersect:
priv_diff = set(new_priv[db_table]) ^ set(curr_priv[db_table])
if (len(priv_diff) > 0):
if check_mode:
return True
if not append_privs:
privileges_revoke(cursor, user,host,db_table,curr_priv[db_table],grant_option)
privileges_grant(cursor, user,host,db_table,new_priv[db_table])
changed = True
return changed
def user_delete(cursor, user, host, host_all, check_mode):
if check_mode:
return True
if host_all:
hostnames = user_get_hostnames(cursor, user)
for hostname in hostnames:
cursor.execute("DROP USER %s@%s", (user, hostname))
else:
cursor.execute("DROP USER %s@%s", (user, host))
return True
def user_get_hostnames(cursor, user):
cursor.execute("SELECT Host FROM mysql.user WHERE user = %s", user)
hostnames_raw = cursor.fetchall()
hostnames = []
for hostname_raw in hostnames_raw:
hostnames.append(hostname_raw[0])
return hostnames
def privileges_get(cursor, user,host):
""" MySQL doesn't have a better method of getting privileges aside from the
SHOW GRANTS query syntax, which requires us to then parse the returned string.
Here's an example of the string that is returned from MySQL:
GRANT USAGE ON *.* TO 'user'@'localhost' IDENTIFIED BY 'pass';
This function makes the query and returns a dictionary containing the results.
The dictionary format is the same as that returned by privileges_unpack() below.
"""
output = {}
cursor.execute("SHOW GRANTS FOR %s@%s", (user, host))
grants = cursor.fetchall()
def pick(x):
if x == 'ALL PRIVILEGES':
return 'ALL'
else:
return x
for grant in grants:
res = re.match("GRANT (.+) ON (.+) TO '.*'@'.+'( IDENTIFIED BY PASSWORD '.+')? ?(.*)", grant[0])
if res is None:
raise InvalidPrivsError('unable to parse the MySQL grant string: %s' % grant[0])
privileges = res.group(1).split(", ")
privileges = [ pick(x) for x in privileges]
if "WITH GRANT OPTION" in res.group(4):
privileges.append('GRANT')
if "REQUIRE SSL" in res.group(4):
privileges.append('REQUIRESSL')
db = res.group(2)
output[db] = privileges
return output
def privileges_unpack(priv, mode):
""" Take a privileges string, typically passed as a parameter, and unserialize
it into a dictionary, the same format as privileges_get() above. We have this
custom format to avoid using YAML/JSON strings inside YAML playbooks. Example
of a privileges string:
mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanother.*:ALL
The privilege USAGE stands for no privileges, so we add that in on *.* if it's
not specified in the string, as MySQL will always provide this by default.
"""
if mode == 'ANSI':
quote = '"'
else:
quote = '`'
output = {}
privs = []
for item in priv.strip().split('/'):
pieces = item.strip().split(':')
dbpriv = pieces[0].rsplit(".", 1)
# Do not escape if privilege is for database '*' (all databases)
if dbpriv[0].strip('`') != '*':
pieces[0] = '%s%s%s.%s' % (quote, dbpriv[0].strip('`'), quote, dbpriv[1])
if '(' in pieces[1]:
output[pieces[0]] = re.split(r',\s*(?=[^)]*(?:\(|$))', pieces[1].upper())
for i in output[pieces[0]]:
privs.append(re.sub(r'\(.*\)','',i))
else:
output[pieces[0]] = pieces[1].upper().split(',')
privs = output[pieces[0]]
new_privs = frozenset(privs)
if not new_privs.issubset(VALID_PRIVS):
raise InvalidPrivsError('Invalid privileges specified: %s' % new_privs.difference(VALID_PRIVS))
if '*.*' not in output:
output['*.*'] = ['USAGE']
# if we are only specifying something like REQUIRESSL and/or GRANT (=WITH GRANT OPTION) in *.*
# we still need to add USAGE as a privilege to avoid syntax errors
if 'REQUIRESSL' in priv and not set(output['*.*']).difference(set(['GRANT', 'REQUIRESSL'])):
output['*.*'].append('USAGE')
return output
def privileges_revoke(cursor, user,host,db_table,priv,grant_option):
# Escape '%' since mysql db.execute() uses a format string
db_table = db_table.replace('%', '%%')
if grant_option:
query = ["REVOKE GRANT OPTION ON %s" % db_table]
query.append("FROM %s@%s")
query = ' '.join(query)
cursor.execute(query, (user, host))
priv_string = ",".join([p for p in priv if p not in ('GRANT', 'REQUIRESSL')])
query = ["REVOKE %s ON %s" % (priv_string, db_table)]
query.append("FROM %s@%s")
query = ' '.join(query)
cursor.execute(query, (user, host))
def privileges_grant(cursor, user,host,db_table,priv):
# Escape '%' since mysql db.execute uses a format string and the
# specification of db and table often use a % (SQL wildcard)
db_table = db_table.replace('%', '%%')
priv_string = ",".join([p for p in priv if p not in ('GRANT', 'REQUIRESSL')])
query = ["GRANT %s ON %s" % (priv_string, db_table)]
query.append("TO %s@%s")
if 'REQUIRESSL' in priv:
query.append("REQUIRE SSL")
if 'GRANT' in priv:
query.append("WITH GRANT OPTION")
query = ' '.join(query)
cursor.execute(query, (user, host))
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec = dict(
login_user=dict(default=None),
login_password=dict(default=None, no_log=True),
login_host=dict(default="localhost"),
login_port=dict(default=3306, type='int'),
login_unix_socket=dict(default=None),
user=dict(required=True, aliases=['name']),
password=dict(default=None, no_log=True, type='str'),
encrypted=dict(default=False, type='bool'),
host=dict(default="localhost"),
host_all=dict(type="bool", default="no"),
state=dict(default="present", choices=["absent", "present"]),
priv=dict(default=None),
append_privs=dict(default=False, type='bool'),
check_implicit_admin=dict(default=False, type='bool'),
update_password=dict(default="always", choices=["always", "on_create"]),
connect_timeout=dict(default=30, type='int'),
config_file=dict(default="~/.my.cnf", type='path'),
sql_log_bin=dict(default=True, type='bool'),
ssl_cert=dict(d | efault=None, type='path'),
ssl_key=dict(default=None, type='path'), |
ssl_ca=dict(default=None, type='path'),
),
supports_check_mode=True
)
login_user = module.params["login_user"]
login_password = module.params["login_password"]
user = module.params["user"]
password = module.params["password"]
encrypted = module.boolean(module.params["encrypted"])
host = module.params["host"].lower()
host_all = module.params["host_all"]
state = module.params["state"]
priv = module.params["priv"]
check_implicit_admin = module.params['check_implicit_admin']
connect_timeout = module.params['connect_timeout']
config_file = module.params['config_file']
append_privs = module.boolean(module.params["append_privs"])
update_password = module.params['update_password']
ssl_cert = module.params["ssl_cert"]
ssl_key = module.params["ssl_key"]
ssl_ca = module.params["ssl_ca"]
db = 'mysql'
sql_log_bin = module.params["sql_log_bin"]
if not mysqldb_found:
module.fail_json(msg="the python mysqldb module is required")
cursor = None
try:
if check_implicit_admin:
try:
cursor = mysql_connect(module, 'root', '', config_file, ssl_cert, ssl_key, ssl_ca, db,
conn |
to_aggregate: number of replicas to aggregate for each variable
update.
total_num_replicas: Total number of tasks/workers/replicas, could be
different from replicas_to_aggregate.
If total_num_replicas > replicas_to_aggregate: it is backup_replicas +
replicas_to_aggregate.
If total_num_replicas < replicas_to_aggregate: Replicas compute
multiple batches per update to variables.
variable_averages: Optional `ExponentialMovingAverage` object, used to
maintain moving averages for the variables passed in
`variables_to_average`.
variables_to_average: a list of variables that need to be averaged. Only
needed if variable_averages is passed in.
| use_locking: If True use locks for update operation.
name: string. Optional name of the returned operation.
"""
if total_num_replicas is None:
total_num_replicas = replicas_to_aggregate
super(SyncReplicasOptimizerV2, self).__init__(use_locking, name)
logging.info(
"SyncReplicasV2: replicas_to_aggregate=%s; total_num_replicas=%s",
replicas_to_aggregate, total_num_replicas)
self._opt = opt
self._replicas_to_aggregate = replicas_to_aggre | gate
self._gradients_applied = False
self._variable_averages = variable_averages
self._variables_to_average = variables_to_average
self._total_num_replicas = total_num_replicas
self._tokens_per_step = max(total_num_replicas, replicas_to_aggregate)
self._global_step = None
self._sync_token_queue = None
# The synchronization op will be executed in a queue runner which should
# only be executed by one of the replicas (usually the chief).
self._chief_queue_runner = None
# Remember which accumulator is on which device to set the initial step in
# the accumulator to be global step. This list contains list of the
# following format: (accumulator, device).
self._accumulator_list = []
def compute_gradients(self, *args, **kwargs):
"""Compute gradients of "loss" for the variables in "var_list".
This simply wraps the compute_gradients() from the real optimizer. The
gradients will be aggregated in the apply_gradients() so that user can
modify the gradients like clipping with per replica global norm if needed.
The global norm with aggregated gradients can be bad as one replica's huge
gradients can hurt the gradients from other replicas.
Args:
*args: Arguments for compute_gradients().
**kwargs: Keyword arguments for compute_gradients().
Returns:
A list of (gradient, variable) pairs.
"""
return self._opt.compute_gradients(*args, **kwargs)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This contains most of the synchronization implementation and also wraps the
apply_gradients() from the real optimizer.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
compute_gradients().
global_step: Optional Variable to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the Optimizer constructor.
Returns:
train_op: The op to dequeue a token so the replicas can exit this batch
and start the next one. This is executed by each replica.
Raises:
ValueError: If the grads_and_vars is empty.
ValueError: If global step is not provided, the staleness cannot be
checked.
"""
if not grads_and_vars:
raise ValueError("Must supply at least one variable")
if global_step is None:
raise ValueError("Global step is required to check staleness")
self._global_step = global_step
train_ops = []
aggregated_grad = []
var_list = []
self._local_step = variables.Variable(
initial_value=0,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="sync_rep_local_step")
self.local_step_init_op = state_ops.assign(self._local_step, global_step)
chief_init_ops = [self.local_step_init_op]
self.ready_for_local_init_op = variables.report_uninitialized_variables(
variables.all_variables())
with ops.name_scope(None, self._name):
for grad, var in grads_and_vars:
var_list.append(var)
with ops.device(var.device):
# Dense gradients.
if grad is None:
aggregated_grad.append(None) # pass-through.
continue
elif isinstance(grad, ops.Tensor):
grad_accum = data_flow_ops.ConditionalAccumulator(
grad.dtype,
shape=var.get_shape(),
shared_name=var.name + "/grad_accum")
train_ops.append(grad_accum.apply_grad(
grad, local_step=self._local_step))
aggregated_grad.append(grad_accum.take_grad(
self._replicas_to_aggregate))
else:
if not isinstance(grad, ops.IndexedSlices):
raise ValueError("Unknown grad type!")
grad_accum = data_flow_ops.SparseConditionalAccumulator(
grad.dtype, shape=(), shared_name=var.name + "/grad_accum")
train_ops.append(grad_accum.apply_indexed_slices_grad(
grad, local_step=self._local_step))
aggregated_grad.append(grad_accum.take_indexed_slices_grad(
self._replicas_to_aggregate))
self._accumulator_list.append((grad_accum, var.device))
aggregated_grads_and_vars = zip(aggregated_grad, var_list)
# sync_op will be assigned to the same device as the global step.
with ops.device(global_step.device), ops.name_scope(""):
update_op = self._opt.apply_gradients(aggregated_grads_and_vars,
global_step)
# Create token queue.
with ops.device(global_step.device), ops.name_scope(""):
sync_token_queue = (
data_flow_ops.FIFOQueue(-1,
global_step.dtype.base_dtype,
shapes=(),
shared_name="sync_token_q"))
self._sync_token_queue = sync_token_queue
# dummy_queue is passed to the queue runner. Don't use the real queues
# because the queue runner doesn't automatically reopen it once it
# closed queues in PS devices.
dummy_queue = (
data_flow_ops.FIFOQueue(1,
types_pb2.DT_INT32,
shapes=(),
shared_name="dummy_queue"))
with ops.device(global_step.device), ops.name_scope(""):
# Replicas have to wait until they can get a token from the token queue.
with ops.control_dependencies(train_ops):
token = sync_token_queue.dequeue()
train_op = state_ops.assign(self._local_step, token)
with ops.control_dependencies([update_op]):
# Sync_op needs to insert tokens to the token queue at the end of the
# step so the replicas can fetch them to start the next step.
tokens = array_ops.fill([self._tokens_per_step], global_step.ref())
sync_op = sync_token_queue.enqueue_many((tokens,))
if self._variable_averages is not None:
with ops.control_dependencies([sync_op]), ops.name_scope(""):
sync_op = self._variable_averages.apply(
self._variables_to_average)
self._chief_queue_runner = queue_runner.QueueRunner(dummy_queue,
[sync_op])
for accum, dev in self._accumulator_list:
with ops.device(dev):
chief_init_ops.append(
accum.set_global_step(
global_step, name="SetGlobalStep"))
self.chief_init_op = control_flow_ops.group(*(chief_init_ops))
self._gradients_applied = True
return train_op
def get_chief_queue_runner(self):
"""Returns the QueueRunner for the chief to execute.
This inc |
#!/usr/bin/env python
#######################################################
# Copyright (c) 2015, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
import os
import re
# package can be distributed with arrayfire binaries or
# just with python wrapper files, the AF_BUILD_LOCAL
# environment var determines whether to build the arrayfire
# binaries locally rather than searching in a system install
AF_BUILD_LOCAL_LIBS = os.environ.get('AF_BUILD_LOCAL_LIBS')
print(f'AF_BUILD_LOCAL_LIBS={AF_BUILD_LOCAL_LIBS}')
if AF_BUILD_LOCAL_LIBS:
print('Proceeding to build ArrayFire libraries')
else:
print('Skipping binaries installation, only python files will be installed')
AF_BUILD_CPU = os.environ.get('AF_BUILD_CPU')
AF_BUILD_CPU = 1 if AF_BUILD_CPU is None else int(AF_BUILD_CPU)
AF_BUILD_CPU_CMAKE_STR = '-DAF_BUILD_CPU:BOOL=ON' if (AF_BUILD_CPU == 1) else '-DAF_BUILD_CPU:BOOL=OFF'
AF_BUILD_CUDA = os.environ.get('AF_BUILD_CUDA')
AF_BUILD_CUDA = 1 if AF_BUILD_CUDA is None else int(AF_BUILD_CUDA)
AF_BUILD_CUDA_CMAKE_STR = '-DAF_BUILD_CUDA:BOOL=ON' if (AF_BUILD_CUDA == 1) else '-DAF_BUILD_CUDA:BOOL=OFF'
AF_BUILD_OPENCL = os.environ.get('AF_BUILD_OPENCL')
AF_BUILD_OPENCL = 1 if AF_BUILD_OPENCL is None else int(AF_BUILD_OPENCL)
AF_BUILD_OPENCL_CMAKE_STR = '-DAF_BUILD_OPENCL:BOOL=ON' if (AF_BUILD_OPENCL == 1) else '-DAF_BUILD_OPENCL:BOOL=OFF'
AF_BUILD_UNIFIED = os.environ.get('AF_BUILD_UNIFIED')
AF_BUILD_UNIFIED = 1 if AF_BUILD_UNIFIED is None else int(AF_BUILD_UNIFIED)
AF_BUILD_UNIFIED_CMAKE_STR = '-DAF_BUILD_UNIFIED:BOOL=ON' if (AF_BUILD_UNIFIED == 1) else '-DAF_BUILD_UNIFIED:BOOL=OFF'
if AF_BUILD_LOCAL_LIBS:
# invoke cmake and build arrayfire libraries to install locally in package
from skbuild import setup
def filter_af_files(cmake_manifest):
cmake_manifest = list(filter(lambda name: not (name.endswith('.h')
or name.endswith('.cpp')
or name.endswith('.hpp')
or name.endswith('.cmake')
or name.endswith('jpg')
or name.endswith('png')
or name.endswith('libaf.so') #avoids duplicates due to symlinks
or re.match('.*libaf\.so\.3\..*', name) is not None
or name.endswith('libafcpu.so')
or re.match('.*libafcpu\.so\.3\..*', name) is not None
or name.endswith('libafcuda.so')
or re.match('.*libafcuda\.so\.3\..*', name) is not None
or name.endswith('libafopencl.so')
or re.match('.*libafopencl\.so\.3\..*', name) is not None
or name.endswith('libforge.so')
or re.match('.*libforge\.so\.1\..*', name) is not None
or 'examples' in name), cmake_manifest))
return cmake_manifest
print('Building CMAKE with following configurable variables: ')
print(AF_BUILD_CPU_CMAKE_STR)
print(AF_BUILD_CUDA_CMAKE_STR)
print(AF_BUILD_OPENCL_CMAKE_STR)
print(AF_BUILD_UNIFIED_CMAKE_STR)
setup(
packages=['arrayfire'],
cmake_install_dir='',
cmake_process_manifest_hook=filter_af_files,
include_package_data=False,
cmake_args=[AF_BUILD_CPU_CMAKE_STR,
AF_BUILD_CUDA_CMAKE_STR,
AF_BUILD_OPENCL_CMAKE_STR,
AF_BUILD_UNIFIED_CMAKE_STR,
# todo: pass additional args from environ
'- | DCMAKE_BUILD_TYPE:STRING="RelWithDebInfo"',
'-DFG_USE_STATIC_CPPFLAGS:BOOL=OFF',
'-DFG_WITH_FREEIMAGE:BOOL=OFF',
'-DCUDA_architecture_build_targets:STRING=All',
'-DAF_BUILD_DOCS:BOOL=OFF',
'-DAF_BUILD_EXAMPLES:BOOL=OFF',
| '-DAF_INSTALL_STANDALONE:BOOL=ON',
'-DAF_WITH_IMAGEIO:BOOL=ON',
'-DAF_WITH_LOGGING:BOOL=ON',
'-DBUILD_TESTING:BOOL=OFF',
'-DAF_BUILD_FORGE:BOOL=ON',
'-DAF_INSTALL_LIB_DIR:STRING=arrayfire',
'-DAF_INSTALL_BIN_DIR:STRING=arrayfire',
'-DFG_INSTALL_LIB_DIR:STRING=arrayfire',
'-DAF_WITH_STATIC_MKL=ON',
]
)
else:
# ignores local arrayfire libraries, will search system instead
from setuptools import setup
setup()
|
r"\w+\([^)]*\)")
_RE_SEPS = re.compile(r"(?<=[ )])AND(?=\s)|(?<=[ )])OR(?=\s)|(?<=[ )])NOT(?=\s)")
_RE_OK = re.compile(r"%s|and|or|not")
#
#
# Lock handler
#
#
class LockHandler(object):
"""
This handler should be attached to all objects implementing
permission checks, under the property 'lockhandler'.
"""
def __init__(self, obj):
"""
Loads and pre-caches all relevant locks and their functions.
Args:
obj (object): The object on which the lockhandler is
defined.
"""
if not _LOCKFUNCS:
_cache_lockfuncs()
self.obj = obj
self.locks = {}
try:
self.reset()
except LockException as err:
logger.log_trace(err)
def __str__(self):
return ";".join(self.locks[key][2] for key in sorted(self.locks))
def _log_error(self, message):
"Try to log errors back to object"
raise LockException(message)
def _parse_lockstring(self, storage_lockstring):
"""
Helper function. This is normally only called when the
lockstring is cached and does preliminary checking. locks are
stored as a string
atype:[NOT] lock()[[ AND|OR [NOT] lock()[...]];atype...
Args:
storage_locksring (str): The lockstring to parse.
"""
locks = {}
if not storage_lockstring:
return locks
duplicates = 0
elist = [] # errors
wlist = [] # warnings
for raw_lockstring in storage_lockstring.split(';'):
if not raw_lockstring:
continue
lock_funcs = []
try:
access_type, rhs = (part.strip() for part in raw_lockstring.split(':', 1))
except ValueError:
logger.log_trace()
return locks
# parse the lock functions and separators
funclist = _RE_FUNCS.findall(rhs)
evalstring = rhs
for pattern in ('AND', 'OR', 'NOT'):
evalstring = re.sub(r"\b%s\b" % pattern, pattern.lower(), evalstring)
nfuncs = len(funclist)
for funcstring in funclist:
funcname, rest = (part.strip().strip(')') for part in funcstring.split('(', 1))
func = _LOCKFUNCS.get(funcname, None)
if not callable(func):
elist.append(_("Lock: lock-function '%s' is not available.") % funcstring)
continue
args = list(arg.strip() for arg in rest.split(',') if arg and not '=' in arg)
kwargs = dict([arg.split('=', 1) for arg in rest.split(',') if arg and '=' in arg])
lock_funcs.append((func, args, kwargs))
evalstring = evalstring.replace(funcstring, '%s')
if len(lock_funcs) < nfuncs:
continue
try:
# purge the eval string of any superfluous items, then test it
evalstring = " ".join(_RE_OK.findall(evalstring))
eval(evalstring % tuple(True for func in funclist), {}, {})
except Exception:
elist.append(_("Lock: definition '%s' has syntax errors.") % raw_lockstring)
continue
if access_type in locks:
duplicates += 1
wlist.append(_("LockHandler on %(obj)s: access type '%(access_type)s' changed from '%(source)s' to '%(goal)s' " % \
{"obj":self.obj, "access_type":access_type, "source":locks[access_type][2], "goal":raw_lockstring}))
locks[access_type] = (evalstring, tuple(lock_funcs), raw_lockstring)
if wlist and WARNING_LOG:
# a warning text was set, it's not an error, so only report
logger.log_file("\n".join(wlist), WARNING_LOG)
if elist:
# an error text was set, raise exception.
raise LockException("\n".join(elist))
# return the gathered locks in an easily executable form
return locks
def _cache_locks(self, storage_lockstring):
"""
Store data
"""
self.locks = self._parse_lockstring(storage_lockstring)
def _save_locks(self):
"""
Store locks to obj
"""
self.obj.lock_storage = ";".join([tup[2] for tup in self.locks.values()])
def cache_lock_bypass(self, obj):
"""
We cache superuser bypass checks here for efficiency. This
needs to be re-run when a player is assigned to a character.
We need to grant access to superusers. We need to check both
directly on the object (players), through obj.player and using
the get_player() method (this sits on serversessions, in some
rare cases where a check is done before the login process has
yet been fully finalized)
Args:
obj (object): This is checked for the `is_superuser` property.
"""
self.lock_bypass = hasattr(obj, "is_superuser") and obj.is_superuser
def add(self, lockstring):
"""
Add a new lockstring to handler.
Args:
lockstring (str): A string on the form
`"<access_type>:<functions>"`. Multiple access types
should be separated by semicolon (`;`).
Returns:
success (bool): The outcome of the addition, `False` on
error.
"""
# sanity checks
for lockdef in lockstring.split(';'):
if not ':' in lockstring:
self._log_error(_("Lock: '%s' contains no colon (:).") % lockdef)
return False
access_type, rhs = [part.strip() for part | in lockdef.split(':', 1)]
if not access_type:
self._log_error(_("Lock: '%s' has no access_type (left-side of colon is empty).") % lockdef)
return False
if rhs.count('(') != rhs.count(')'):
self._log_error(_("Lock: '%s' has mismatched parentheses.") % lockdef)
return False
if not _RE_FUNCS.findall(rhs):
self._log_error(_("Lock: '%s' has no valid lock functions.") % lockdef)
| return False
# get the lock string
storage_lockstring = self.obj.lock_storage
if storage_lockstring:
storage_lockstring = storage_lockstring + ";" + lockstring
else:
storage_lockstring = lockstring
# cache the locks will get rid of eventual doublets
self._cache_locks(storage_lockstring)
self._save_locks()
return True
def replace(self, lockstring):
"""
Replaces the lockstring entirely.
Args:
lockstring (str): The new lock definition.
Return:
success (bool): False if an error occurred.
Raises:
LockException: If a critical error occurred.
If so, the old string is recovered.
"""
old_lockstring = str(self)
self.clear()
try:
return self.add(lockstring)
except LockException:
self.add(old_lockstring)
raise
def get(self, access_type=None):
"""
Get the full lockstring or the lockstring of a particular
access type.
Args:
access_type (str, optional):
Returns:
lockstring (str): The matched lockstring, or the full
lockstring if no access_type was given.
"""
if access_type:
return self.locks.get(access_type, ["", "", ""])[2]
return str(self)
def all(self):
"""
Return all lockstrings.
Returns:
lockstring (str): The full lockstring
"""
return self.get()
def remove(self, access_type):
"""
Remove a particular lock from the handler
Args:
access_type (str): The type of lock to remove.
Returns:
success (bool): If the access_type was not found
in the lock, this returns `False`.
"""
|
that have overridden this method.
"""
self.application.log_request(self)
def _request_summary(self) -> str:
return "%s %s (%s)" % (
self.request.method,
self.request.uri,
self.request.remote_ip,
)
def _handle_request_exception(self, e: BaseException) -> None:
if isinstance(e, Finish):
# Not an error; just finish the request without logging.
if not self._finished:
self.finish(*e.args)
return
try:
self.log_exception(*sys.exc_info())
except Exception:
# An error here should still get a best-effort send_error()
# to avoid leaking the connection.
app_log.error("Error in exception logger", exc_info=True)
if self._finished:
# Extra errors after the request has been finished should
# be logged, but there is no reason to continue to try and
# send a response.
return
if isinstance(e, HTTPError):
self.send_error(e.status_code, exc_info=sys.exc_info())
else:
self.send_error(500, exc_info=sys.exc_info())
def log_exception(
self,
typ: "Optional[Type[BaseException]]",
value: Optional[BaseException],
tb: Optional[TracebackType],
) -> None:
"""Override to customize logging of uncaught exceptions.
By default logs instances of `HTTPError` as warnings without
stack traces (on the ``tornado.general`` logger), and all
other exceptions as errors with stack traces (on the
``tornado.application`` logger).
.. versionadded:: 3.1
"""
if isinstance(value, HTTPError):
if value.log_message:
format = "%d %s: " + value.log_message
args = [value.status_code, self._request_summary()] + list(value.args)
gen_log.warning(format, *args)
else:
app_log.error( # type: ignore
"Uncaught exception %s\n%r",
self._request_summary(),
self.request,
exc_info=(typ, value, tb),
)
def _ui_module(self, name: str, module: Type["UIModule"]) -> Callable[..., str]:
def render(*args, **kwargs) -> str: # type: ignore
if not hasattr(self, "_active_modules"):
self._active_modules = {} # type: Dict[str, UIModule]
if name not in self._active_modules:
self._active_modules[name] = module(self)
rendered = self._active_modules[name].render(*args, **kwargs)
return rendered
return render
def _ui_method(self, method: Callable[..., str]) -> Callable[..., str]:
return lambda *args, **kwargs: method(self, *args, **kwargs)
def _clear_headers_for_304(self) -> None:
# 304 responses should not contain entity headers (defined in
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1)
# not explicitly allowed by
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
headers = [
"Allow",
"Content-Encoding",
"Content-Language",
"Content-Length",
"Content-MD5",
"Content-Range",
"Content-Type",
"Last-Modified",
]
for h in headers:
self.clear_header(h)
def stream_request_body(cls: Type[RequestHandler]) -> Type[RequestHandler]:
"""Apply to `RequestHandler` subclasses to enable streaming body support.
This decorator implies the following changes:
* `.HTTPServerRequest.body` is undefined, and body arguments will not
be included in `RequestHandler.get_argument`.
* `RequestHandler.prepare` is called when the request headers have been
read instead of after the entire body has been read.
* The subclass must define a method ``data_received(self, data):``, which
will be called zero or more times as data is available. Note that
if the request has an empty body, ``data_received`` may not be called.
* ``prepare`` and ``data_received`` may return Futures (such as via
``@gen.coroutine``, in which case the next method will not be called
until those futures have completed.
* The regular HTTP method (``post``, ``put``, etc) will be called after
the entire body has been read.
See the `file receiver demo <https://github.com/tornadoweb/tornado/tree/master/demos/file_upload/>`_
for example usage.
""" # noqa: E501
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
cls._stream_request_body = True
return cls
def _has_stream_request_body(cls: Type[RequestHandler]) -> bool:
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
return cls._stream_request_body
def removeslash(
method: Callable[..., Optional[Awaitable[None]]]
) -> Callable[..., Optional[Awaitable[None]]]:
"""Use this decorator to remove trailing slashes from the request path.
For example, a request to ``/foo/`` would redirect to ``/foo`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/*'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper( # type: ignore
self: RequestHandler, *args, **kwargs
) -> Optional[Awaitable[None]]:
if self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path.rstrip("/")
if uri: # don't try to redirect '/' to ''
if self.request.query:
uri += "?" + self.request.q | uery
self.redirect(uri, permanent=True)
return None
else:
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
def addslash(
method: Callable[..., Optional[Awaitable[None]]]
) -> Callable[..., Optional[Awaitable[None]]]:
"""Use this decorator to add a missing trailing slash to the request path.
For example, a request to ``/foo`` would redirect to ``/foo/`` with thi | s
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/?'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper( # type: ignore
self: RequestHandler, *args, **kwargs
) -> Optional[Awaitable[None]]:
if not self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path + "/"
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return None
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
class _ApplicationRouter(ReversibleRuleRouter):
"""Routing implementation used internally by `Application`.
Provides a binding between `Application` and `RequestHandler`.
This implementation extends `~.routing.ReversibleRuleRouter` in a couple of ways:
* it allows to use `RequestHandler` subclasses as `~.routing.Rule` target and
* it allows to use a list/tuple of rules as `~.routing.Rule` target.
``process_rule`` implementation will substitute this list with an appropriate
`_ApplicationRouter` instance.
"""
def __init__(self, application: "Application", rules: _RuleList = None) -> None:
assert isinstance(application, Application)
self.application = application
super(_ApplicationRouter, self).__init__(rules)
def process_rule(self, rule: Rule) -> Rule:
rule = super(_ApplicationRouter, self).process_rule(rule)
if isinstance(rule.target, (list, tuple)):
rule.target = _ApplicationRouter( # type: ignore
self.application, rule.target
|
x.has_result_python_document_class()): self.set_result_python_document_class(x.result_python_document_class())
def Equals(self, x):
if x is self: return 1
if self.has_topic_ != x.has_topic_: return 0
if self.has_topic_ and self.topic_ != x.topic_: return 0
if self.has_document_ != x.has_document_: return 0
if self.has_document_ and self.document_ != x.document_: return 0
if self.has_result_batch_size_ != x.has_result_batch_size_: return 0
if self.has_result_batch_size_ and self.result_batch_size_ != x.result_batch_size_: return 0
if self.has_result_task_queue_ != x.has_result_task_queue_: return 0
if self.has_result_task_queue_ and self.result_task_queue_ != x.result_task_queue_: return 0
if self.has_result_relative_url_ != x.has_result_relative_url_: return 0
if self.has_result_relative_url_ and self.result_relative_url_ != x.result_relative_url_: return 0
if self.has_result_key_ != x.has_result_key_: return 0
if self.has_result_key_ and self.result_key_ != x.result_key_: return 0
if self.has_result_python_document_class_ != x.has_result_python_document_class_: return 0
if self.has_result_python_document_class_ and self.result_python_document_class_ != x.result_python_document_class_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_topic_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: topic not set.')
if (not self.has_document_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: document not set.')
elif not self.document_.IsInitialized(debug_strs): initialized = 0
if (not self.has_result_batch_size_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: result_batch_size not set.')
if (not self.has_result_task_queue_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: result_task_queue not set.')
if (not self.has_result_relative_url_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: result_relative_url not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.topic_))
n += self.lengthString(self.document_.ByteSize())
n += self.lengthVarInt64(self.result_batch_size_)
n += self.lengthString(len(self.result_task_queue_))
n += self.lengthString(len(self.result_relative_url_))
if (self.has_result_key_): n += 1 + self.lengthString(len(self.result_key_))
if (self.has_result_python_document_class_): n += 1 + self.lengthVarInt64(self.result_python_document_class_)
return n + 5
def ByteSizePartial(self):
n = 0
if (self.has_topic_):
n += 1
n += self.lengthString(len(self.topic_))
if (self.has_document_):
n += 1
n += self.lengthString(self.document_.ByteSi | zePartial())
if ( | self.has_result_batch_size_):
n += 1
n += self.lengthVarInt64(self.result_batch_size_)
if (self.has_result_task_queue_):
n += 1
n += self.lengthString(len(self.result_task_queue_))
if (self.has_result_relative_url_):
n += 1
n += self.lengthString(len(self.result_relative_url_))
if (self.has_result_key_): n += 1 + self.lengthString(len(self.result_key_))
if (self.has_result_python_document_class_): n += 1 + self.lengthVarInt64(self.result_python_document_class_)
return n
def Clear(self):
self.clear_topic()
self.clear_document()
self.clear_result_batch_size()
self.clear_result_task_queue()
self.clear_result_relative_url()
self.clear_result_key()
self.clear_result_python_document_class()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.topic_)
out.putVarInt32(18)
out.putVarInt32(self.document_.ByteSize())
self.document_.OutputUnchecked(out)
out.putVarInt32(24)
out.putVarInt32(self.result_batch_size_)
out.putVarInt32(34)
out.putPrefixedString(self.result_task_queue_)
out.putVarInt32(42)
out.putPrefixedString(self.result_relative_url_)
if (self.has_result_key_):
out.putVarInt32(50)
out.putPrefixedString(self.result_key_)
if (self.has_result_python_document_class_):
out.putVarInt32(56)
out.putVarInt32(self.result_python_document_class_)
def OutputPartial(self, out):
if (self.has_topic_):
out.putVarInt32(10)
out.putPrefixedString(self.topic_)
if (self.has_document_):
out.putVarInt32(18)
out.putVarInt32(self.document_.ByteSizePartial())
self.document_.OutputPartial(out)
if (self.has_result_batch_size_):
out.putVarInt32(24)
out.putVarInt32(self.result_batch_size_)
if (self.has_result_task_queue_):
out.putVarInt32(34)
out.putPrefixedString(self.result_task_queue_)
if (self.has_result_relative_url_):
out.putVarInt32(42)
out.putPrefixedString(self.result_relative_url_)
if (self.has_result_key_):
out.putVarInt32(50)
out.putPrefixedString(self.result_key_)
if (self.has_result_python_document_class_):
out.putVarInt32(56)
out.putVarInt32(self.result_python_document_class_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_topic(d.getPrefixedString())
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_document().TryMerge(tmp)
continue
if tt == 24:
self.set_result_batch_size(d.getVarInt32())
continue
if tt == 34:
self.set_result_task_queue(d.getPrefixedString())
continue
if tt == 42:
self.set_result_relative_url(d.getPrefixedString())
continue
if tt == 50:
self.set_result_key(d.getPrefixedString())
continue
if tt == 56:
self.set_result_python_document_class(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_topic_: res+=prefix+("topic: %s\n" % self.DebugFormatString(self.topic_))
if self.has_document_:
res+=prefix+"document <\n"
res+=self.document_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_result_batch_size_: res+=prefix+("result_batch_size: %s\n" % self.DebugFormatInt32(self.result_batch_size_))
if self.has_result_task_queue_: res+=prefix+("result_task_queue: %s\n" % self.DebugFormatString(self.result_task_queue_))
if self.has_result_relative_url_: res+=prefix+("result_relative_url: %s\n" % self.DebugFormatString(self.result_relative_url_))
if self.has_result_key_: res+=prefix+("result_key: %s\n" % self.DebugFormatString(self.result_key_))
if self.has_result_python_document_class_: res+=prefix+("result_python_document_class: %s\n" % self.DebugFormatInt32(self.result_python_document_class_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
ktopic = 1
kdocument = 2
kresult_batch_size = 3
kresult_task_queue = 4
kresult_relative_url = 5
kresult_key = 6
kresult_python_document_class = 7
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "topic",
2: "document",
3: "result_batch_size",
4: "result_task_queue",
5: "result_relative_url",
6: "result_key",
7: "result_python_document_class",
}, 7)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.STRING,
7: ProtocolBuffer.Encoder.NUMERIC,
}, 7, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """""" |
"""
GraphLab Create offers several data structures for data analysis.
Concise descriptions of the data structures and their methods are contained in
the API documentation, a | long with a small number of simple examples. For more
detailed descriptions and examples, please see the `User Guide
<https://dato.com/learn/userguide/>`_, `API Translator
<https://dato.com/learn/translator/>`_, `How-Tos
<https://dato.com/learn/how-to/>`_, and data science `Gallery
<https://dato.com/learn/gallery/>`_.
"""
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD li | cense. See the LICENSE file for details.
'''
__all__ = ['sframe', 'sarray', 'sgraph', 'sketch', 'image']
from . import image
from . import sframe
from . import sarray
from . import sgraph
from . import sketch
|
from ctypes import Structure, POINTER, c_void_p, cast, create_string_buffer, \
c_char_p, byref, memmove
from ctypes import windll, WinDLL, WINFUNCTYPE
try:
from ctypes import wintypes
except ValueError:
# see http://bugs.python.org/issue16396
raise ImportError("wintypes")
from keyring.util.escape import u
# Crypto API ctypes bindings
class DATA_BLOB(Structure):
_fields_ = [('cbData', wintypes.DWORD),
('pbData', POINTER(wintypes.BYTE))]
class CRYPTPROTECT_PROMPTSTRUCT(Structure):
_fields_ = [('cbSize', wintypes.DWORD),
('dwPromptFlags', wintypes.DWORD),
('hwndApp', wintypes.HWND),
('szPrompt', POINTER(wintypes.WCHAR))]
# Flags for CRYPTPROTECT_PROMPTSTRUCT
CRYPTPROTECT_PROMPT_ON_UNPROTECT = 1
CRYPTPROTECT_PROMPT_ON_PROTECT = 2
# Flags for CryptProtectData/CryptUnprotectData
CRYPTPROTECT_UI_FORBIDDEN = 0x01
CRYPTPROTECT_LOCAL_MACHINE = 0x04
CRYPTPROTECT_CRED_SYNC = 0x08
CRYPTPROTECT_AUDIT = 0x10
| CRYPTPROTECT_NO_RECOVERY = 0x20
CRYPTPROTECT_VERIFY_PROTECTION = 0x40
CRYPTPROTECT_CRED_REGENERATE = 0x80
# C | rypto API Functions
_dll = WinDLL('CRYPT32.DLL')
CryptProtectData = WINFUNCTYPE(wintypes.BOOL,
POINTER(DATA_BLOB),
POINTER(wintypes.WCHAR),
POINTER(DATA_BLOB),
c_void_p,
POINTER(CRYPTPROTECT_PROMPTSTRUCT),
wintypes.DWORD,
POINTER(DATA_BLOB))(('CryptProtectData', _dll))
CryptUnprotectData = WINFUNCTYPE(wintypes.BOOL,
POINTER(DATA_BLOB),
POINTER(wintypes.WCHAR),
POINTER(DATA_BLOB),
c_void_p,
POINTER(CRYPTPROTECT_PROMPTSTRUCT),
wintypes.DWORD, POINTER(DATA_BLOB))(
('CryptUnprotectData', _dll))
# Functions
def encrypt(data, non_interactive=0):
blobin = DATA_BLOB(cbData=len(data),
pbData=cast(c_char_p(data),
POINTER(wintypes.BYTE)))
blobout = DATA_BLOB()
if not CryptProtectData(byref(blobin),
u('python-keyring-lib.win32crypto'),
None, None, None,
CRYPTPROTECT_UI_FORBIDDEN,
byref(blobout)):
raise OSError("Can't encrypt")
encrypted = create_string_buffer(blobout.cbData)
memmove(encrypted, blobout.pbData, blobout.cbData)
windll.kernel32.LocalFree(blobout.pbData)
return encrypted.raw
def decrypt(encrypted, non_interactive=0):
blobin = DATA_BLOB(cbData=len(encrypted),
pbData=cast(c_char_p(encrypted),
POINTER(wintypes.BYTE)))
blobout = DATA_BLOB()
if not CryptUnprotectData(byref(blobin),
u('python-keyring-lib.win32crypto'),
None, None, None,
CRYPTPROTECT_UI_FORBIDDEN,
byref(blobout)):
raise OSError("Can't decrypt")
data = create_string_buffer(blobout.cbData)
memmove(data, blobout.pbData, blobout.cbData)
windll.kernel32.LocalFree(blobout.pbData)
return data.raw
|
#!/usr/bin/env python
''' W celu poprawniego dzialania ponizszego skryptu, trzeba skonigurowac
logowanie RSA / DSA na maszynie na ktora, bedziemy przenosic nasza kopie zapasowa. Sposob konfiguracji mozna znalezc tutaj
http://www.nerdokracja.pl/linux-logowanie-ssh-klucze-rsa/
'''
import os
import os.path
print ('Tworzenie folderu tymczasowego')
cmd = "mkdir ~/tmp" #Tworzenie folderu tymczasowego
print ('Tworze kopie zapasowa bazy danych')
# -u user << wpisujemy swoj login
# -phaslo << haslo wpisujemy ciagiem zaraz po -p
# moja_baza_danych << nalezy zmienic na nazwe bazy danych do zachowania
# moja.sql << nazwa npliku .sql z baza danych
cmd1 = "mysqldump -u user -phaslo moja_baza_dancyh > ~/tmp/moja.sql"
print ('Tworze kopie zapasa folderow')
# /home/ /root/ /var/www/ /etc/ << foldery do zachowania mozna zmieniac wedle uznania
cmd2 = "zip -r ~/tmp/backup.zip ~/tmp/moja.sql /home/ /root/ /var/www/ /etc/"
# Logowanie za pomoca scp
# konto << nazwa konta na maszynie do ktorej backup.zip bedzie wyslany
# jakis_adres_ip << adres maszyny na ktora backup bedzie wyslany
# /home/backup/ << miejsce gdzie backup.zip bedzie przechowywany
print ('Wysylanie backupu ...')
cmd3 = "scp ~/tmp/backup.zip konto@jakis_aders_ip:/home/backup"
print('Usuniecie folderu tymczasowego')
cmd4 = "rm -R ~/tmp"
#wykonianie zdefiniowanych wczesniej | polecen
os.system(cmd)
os.system(cmd1)
os.system(cmd2)
os.system(cm | d3)
os.system(cmd4)
|
"""
Django settings for sw_tts project.
Generated by 'django-admin startproject' using Django 1.9.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1^xn16c_l*5+ko)_#nrue-+as1@jowgr1+e%0y4fk@#rd%*j)3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework_swagger',
'rest_framework',
'tts',
'core',
'social.apps.django_app.default',
]
M | IDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.mi | ddleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sw_tts.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'sw_tts.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
# AUTH_PASSWORD_VALIDATORS = [
# {
# 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
# },
# ]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
('frontend', os.path.join(BASE_DIR, 'frontend')),
)
LANGUAGE_CODE = 'ru-Ru'
OUTPUT_DIR = os.path.join(BASE_DIR, 'generated')
MAX_SOUND_LIFE = 60*60*12 # seconds of sound file storing
SOCIAL_AUTH_VK_OPENAPI_ID = '5596606'
SOCIAL_AUTH_VK_APP_SECRET = 'jBx8nnH7pzevq7UA3hH0'
SOCIAL_AUTH_VK_APP_USER_MODE = 2
VK_APP_ID = '5596606'
VKONTAKTE_APP_ID = VK_APP_ID
VK_API_SECRET = 'jBx8nnH7pzevq7UA3hH0'
VKONTAKTE_APP_SECRET = VK_API_SECRET
SOCIAL_AUTH_VK_OAUTH2_KEY = '5596606'
SOCIAL_AUTH_VK_OAUTH2_SECRET = 'jBx8nnH7pzevq7UA3hH0'
AUTHENTICATION_BACKENDS = (
'social.backends.facebook.FacebookOAuth2',
'social.backends.facebook.FacebookAppOAuth2',
'social.backends.google.GoogleOAuth2',
'social.backends.twitter.TwitterOAuth',
'social.backends.vk.VKOAuth2',
'social.backends.vk.VKontakteOpenAPI',
'social.backends.yandex.YaruOAuth2',
'social.backends.yandex.YandexOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_URL_NAMESPACE = 'social'
# SOCIAL_AUTH_STORAGE = 'social.apps.django_app.me.models.DjangoStorage'
REST_FRAMEWORK = {
'DEFAULT_THROTTLE_CLASSES': (
'rest_framework.throttling.AnonRateThrottle',
'rest_framework.throttling.UserRateThrottle'
),
'DEFAULT_THROTTLE_RATES': {
'anon': '25/minute',
'user': '25/minute'
}
}
try:
from sw_tts.local_settings import *
except ImportError:
print("Warning: no local_settings.py")
|
__all__ = ['pe | rsistence', 'baltimorejsondecoder', 'baltimorejsone | ncoder']
|
# coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid and World Bank
- **Template Composition Test Cases.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'akbargumbira@gmail.com'
__date__ = '06/01/2015'
__copyright__ = ('Copyright 2013, Australia Indonesia Facility for '
'Disaster Reduction')
import os
import unittest
import shutil
import logging
from safe.report.template_composition import TemplateComposition
from safe.utilities.resources import resources_path
from safe.test.utilities import get_qgis_app, temp_dir
LOGGER = logging.getLogger('InaSAFE')
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
INASAFE_TEMPLATE_PATH = resources_path(
'qgis-composer-templates', 'a4-portrait-blue.qpt')
class TemplateC | ompositionTest(unittest.TestCase):
"""Test Impact Merge Dialog widget."""
# noinspection PyPep8Naming
def setUp(self):
"""Runs before each test."""
pass
# noinspection PyPep8Naming
def tearDown(self):
"""Ru | ns after each test."""
pass
def test_constructor(self):
"""Test constructor."""
# If we give param map_settings, composition instance must not be none
map_settings = CANVAS.mapSettings()
template_composition = TemplateComposition(map_settings=map_settings)
message = 'The composition instance variable must not be none.'
self.assertIsNotNone(template_composition.composition, message)
def test_missing_elements(self):
"""Test if we can get missing elements correctly."""
# Copy the inasafe template to temp dir
template_path = os.path.join(
temp_dir('test'), 'a4-portrait-blue.qpt')
shutil.copy2(INASAFE_TEMPLATE_PATH, template_path)
template_composition = TemplateComposition(template_path=template_path)
# No missing elements here
component_ids = [
'white-inasafe-logo',
'north-arrow',
'organisation-logo',
'impact-map',
'impact-legend']
template_composition.component_ids = component_ids
message = 'There should be no missing elements, but it gets: %s' % (
template_composition.missing_elements)
expected_result = []
self.assertEqual(
template_composition.missing_elements, expected_result, message)
# There are missing elements
component_ids = [
'white-inasafe-logo',
'north-arrow',
'organisation-logo',
'impact-map', 'impact-legend',
'i-added-element-id-here-nooo']
template_composition.component_ids = component_ids
message = 'There should be no missing elements, but it gets: %s' % (
template_composition.missing_elements)
expected_result = ['i-added-element-id-here-nooo']
self.assertEqual(
template_composition.missing_elements, expected_result, message)
# Remove test dir
shutil.rmtree(temp_dir('test'))
def test_load_template(self):
"""Test we can load template correctly."""
# Copy the inasafe template to temp dir
template_path = os.path.join(
temp_dir('test'), 'a4-portrait-blue.qpt')
shutil.copy2(INASAFE_TEMPLATE_PATH, template_path)
template_composition = TemplateComposition(
template_path=template_path,
map_settings=CANVAS.mapSettings())
template_composition.load_template()
# Check the element of the composition
# In that template, there should be these components:
component_ids = [
'white-inasafe-logo',
'north-arrow',
'organisation-logo',
'impact-map',
'impact-legend']
for component_id in component_ids:
component = template_composition.composition.getComposerItemById(
component_id)
message = ('In this template: %s, there should be this component '
'%s') % (INASAFE_TEMPLATE_PATH, component_id)
self.assertIsNotNone(component, message)
if __name__ == '__main__':
suite = unittest.makeSuite(TemplateCompositionTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
|
raise plugin.PluginError('XML-RPC fault: Unable to connect to aria2 daemon at %s: %s'
% (baseurl, err.faultString), log)
except socket_error as e:
(error, msg) = e.args
raise plugin.PluginError('Socket connection issue with aria2 daemon at %s: %s'
% (baseurl, msg), log)
except:
raise plugin.PluginError('Unidentified error during connection to aria2 daemon at %s' % baseurl, log)
# loop entries
for entry in task.accepted:
config['aria_dir'] = config['aria_config']['dir']
if 'aria_gid' in entry:
config['aria_config']['gid'] = entry['aria_gid']
elif 'torrent_info_hash' in entry:
config['aria_config']['gid'] = entry['torrent_info_hash'][0:16]
elif 'gid' in config['aria_config']:
del(config['aria_config']['gid'])
if 'content_files' not in entry:
if entry['url']:
entry['content_files'] = [entry['url']]
else:
entry['content_files'] = [entry['title']]
else:
if not isinstance(entry['content_files'], list):
entry['content_files'] = [entry['content_files']]
counter = 0
for cur_file in entry['content_files']:
entry['parent_folders'] = ''
# reset the 'dir' or it will only be rendered on the first loop
config['aria_config']['dir'] = config['aria_dir']
cur_filename = cur_file.split('/')[-1]
if cur_file.split('/')[0] != cur_filename and config['keep_parent_folders']:
lastSlash = cur_file.rfind('/')
cur_path = cur_file[:lastSlash]
if cur_path[0:1] == '/':
cur_path = cur_path[1:]
entry['parent_folders'] = cur_path
log.debug('parent folders: %s' % entry['parent_folders'])
file_dot = cur_filename.rfind(".")
file_ext = cur_filename[file_dot:]
if len(entry['content_files']) > 1 and 'gid' in config['aria_config']:
# if there is more than 1 file, need to give unique gids, this will work up to 999 files
counter += 1
strCounter = str(counter)
if len(entry['content_files']) > 99:
# sorry not sorry if you have more than 999 files
config['aria_config']['gid'] = ''.join([config['aria_config']['gid'][0:-3],
strCounter.rjust(3, str('0'))])
else:
config['aria_config']['gid'] = ''.join([config['aria_config']['gid'][0:-2],
strCounter.rjust(2, str('0'))])
if config['exclude_samples'] == True:
# remove sample files from download list
if cur_filename.lower().find('sample') > -1:
continue
if file_ext not in config['file_exts']:
if config['exclude_non_content'] == True:
# don't download non-content files, like nfos - definable in file_exts
continue
if config['parse_filename']:
if config['content_is_episodes']:
metainfo_series = plugin.get_plugin_by_name('metainfo_series')
guess_series = metainfo_series.instance.guess_series
if guess_series(cur_filename):
parser = guess_series(cur_filename)
entry['series_name'] = parser.name
# if the last four chars are numbers, REALLY good chance it's actually a year...
# fix it if so desired
log.verbose(entry['series_name'])
if re.search(r'\d{4}', entry['series_name'][-4:]) is not None and config['fix_year']:
entry['series_name'] = ''.join([entry['series_name'][0:-4], '(',
entry['series_name'][-4:], ')'])
log.verbose(entry['series_name'])
parser.data = cur_filename
parser.parse
log.debug(parser.id_type)
if parser.id_type == 'ep':
entry['series_id'] = ''.join(['S', str(parser.season).rjust(2, str('0')), 'E',
str(parser.episode).rjust(2, str('0'))])
elif parser.id_type == 'sequence':
entry['series_id'] = parser.episode
elif parser.id_type and parser.id:
entry['series_id'] = parser.id
else:
parser = get_plugin_by_name('parsing').instance.parse_movie(cur_filename)
parser.parse()
log.info(parser)
testname = parser.name
testyear = parser.year
parser.data = entry['title']
parser.parse()
log.info(parser)
if len(parser.name) > len(testname):
entry['name'] = parser.name
entry['movie_name'] = parser.name
else:
entry['name'] = testname
entry['movie_name'] = testname
if parser.year:
entry['year'] = parser.year
entry['movie_year'] = parser.year
| else:
| entry['year'] = testyear
entry['movie_year'] = testyear
if config['rename_content_files']:
if config['content_is_episodes']:
try:
config['aria_config']['out'] = entry.render(config['rename_template']) + file_ext
log.verbose(config['aria_config']['out'])
except RenderError as e:
log.error('Could not rename file %s: %s.' % (cur_filename, e))
continue
else:
try:
config['aria_config']['out'] = entry.render(config['rename_template']) + file_ext
log.verbose(config['aria_config']['out'])
except RenderError as e:
log.error('Could not rename file %s: %s. Try enabling imdb_lookup in this task'
' to assist.' % (cur_filename, e))
continue
elif 'torrent_info_hash' not in entry:
config['aria_config']['out'] = cur_filename
if config['do'] == 'add-new':
log.debug('Adding new file')
new_download = 0
if 'gid' in config['aria_config']:
try:
r = s.aria2.tellStatus(config['aria_config']['gid'], ['gid', 'status'])
log.info('Download status for %s (gid %s): %s' % (
config['aria_config'].get('out', config['uri']), r['gid'],
r['status']))
if r['status'] == 'paused':
try:
if not task.manager.options.test:
s.aria2.unpause(r['gid'])
log.info(' Unpaused down |
import g | el.gelcd
import gel.gelfista
import gel.gelpaths
import gel.ridgepaths
__version__ = "2.0.0"
__all__ = ["gelcd", "gelfista", "gelpaths", "ridg | epaths"]
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# blog.py - maps requests to methods and handles them accordingly.
# Copyright (C) 2017 Jose Ricardo Ziviani
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# --
# IMPORTS
# --
from src.templates import templates
import os
import sys
import subprocess
# --
# CONSTANTS
# --
DEFAULT_DIR = os.path.dirname(os.path.realpath(__file__))
# --
# IMPLEMENTATION
# --
def run_command(cmd):
'''
Runs arbitrary command on shell
'''
proc = subprocess.Popen(cmd.split(),
stdout=subprocess.PIPE,
stderr=subp | rocess.PIPE)
out, err = proc.communicate()
if err:
print err
#sys.exit(1)
return out
def create_feeds():
'''
Creates th | e feed.xml file based on the published posts available
'''
print 'Creating feeds'
tmpls = templates()
if not tmpls.generate_metadata():
print 'ERROR: cannot create feed.xml'
sys.exit(1)
def update_folder():
'''
Updates local repository
'''
print 'Updating folders'
run_command('git pull')
# --
# ENTRY POINT
# --
if __name__ == '__main__':
update_folder()
create_feeds()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='StaticPage',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('url', models.CharField(verbose_name='URL', db_index=True, max_length=100)),
('title', models.CharField(verbose_name='title', max_length=200)),
('title_ru', models.CharField(null=True, verbose_name='title', max_length=200)),
('title_en', models.CharField(null=True, verbose_name='title', max_length=200)),
('content', models.TextField(blank=True, verbose_name='content')),
('content_ru', models.TextField(null=True, | blank=True, | verbose_name='content')),
('content_en', models.TextField(null=True, blank=True, verbose_name='content')),
('template_name', models.CharField(help_text="Example: 'staticpages/contact_page.html'. If this isn't provided, the system will use 'staticpages/default.html'.", verbose_name='template name', blank=True, max_length=70)),
],
options={
'verbose_name_plural': 'static pages',
'ordering': ('url',),
'verbose_name': 'static page',
},
bases=(models.Model,),
),
]
|
from django.core.serializers.json import Serializer
from ..builtins import StringIO
class CustomizableLocalFieldsSerializer(Serializer):
"""
This is a not so elegant copy/paste from django.core.serializer.base.Serializer serialize method.
We wanted to add parent fields of current serialized object because they are lacking when we want to import them
again.
We had to redefine serialize() method to add the possibility to subclass methods that are getting local
fields to serialize (get_local_fields and get_local_m2m_fields)
"""
internal_use_only = False
def serialize(self, queryset, **options):
"""
Serialize a queryset.
"""
self.options = options
self.stream = options.pop("stream", StringIO())
self.selected_fields = options.pop("fields", None)
self.use_natural_foreign_keys = options.pop('use_natural_foreign_keys', False)
self.use_natural_primary_keys = options.pop('use_natural_primary_keys', False)
progress_bar = self.progress_class(
options.pop('progress_output', None), options.pop('object_count', 0)
)
self.start_serialization()
self.first = True
for count, obj in enumerate(queryset, start=1):
self.start_object(obj)
# Use the concrete parent class' _meta instead of the object's _meta
# This is to avoid local_fields problems for proxy models. Refs #17717.
concrete_model = obj._meta.concrete_model
for field in self.get_local_fields(concrete_model):
if field.serialize:
if field.remote_field is None:
if self.selected_fields is None or field.attname in self.selected_fields:
self.handle_field(obj, field)
else:
if self.selected_fields is None or field.attname[:-3] in self.selected_fields:
self.handle_fk_field(obj, field)
for field in self.get_local_m2m_fields(concrete_model):
if field.serialize:
if self.selected_fields is None or field.attname in self.selected_fields:
self.handle_m2m_field(obj | , field)
self.end_object(obj)
progress_bar.update(count)
if self.first:
self.first = False
| self.end_serialization()
return self.getvalue()
def get_local_fields(self, concrete_model):
return concrete_model._meta.local_fields
def get_local_m2m_fields(self, concrete_model):
return concrete_model._meta.many_to_many
|
ot cond_ndim:
if not callable(then_expression):
def then_expression_fn():
return then_expression
else:
then_expression_fn = then_expression
if not callable(else_expression):
def else_expression_fn():
return else_expression
else:
else_expression_fn = else_expression
x = control_flow_ops.cond(condition, then_expression_fn, else_expression_fn)
else:
# tf.where needs its condition tensor
# to be the same shape as its two
# result tensors
if callable(then_expression):
then_expression = then_expression()
if callable(else_expression):
else_expression = else_expression()
expr_ndim = ndim(then_expression)
if cond_ndim > expr_ndim:
raise ValueError('Rank of `condition` should be less than or'
' equal to rank of `then_expression` and '
'`else_expression`. ndim(condition)=' + str(cond_ndim) +
', ndim(then_expression)'
'=' + str(expr_ndim))
if cond_ndim > 1:
ndim_diff = expr_ndim - cond_ndim
cond_shape = array_ops.concat(
[array_ops.shape(condition), [1] * ndim_diff], axis=0)
condi | tion = array_ops.reshape(condition, cond_shape)
expr_shape = array_ops.shape(then_expression)
shape_diff = expr_shap | e - cond_shape
tile_shape = array_ops.where(shape_diff > 0, expr_shape,
array_ops.ones_like(expr_shape))
condition = array_ops.tile(condition, tile_shape)
x = array_ops.where(condition, then_expression, else_expression)
return x
@keras_export('keras.backend.in_train_phase')
def in_train_phase(x, alt, training=None):
"""Selects `x` in train phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Arguments:
x: What to return in train phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
Returns:
Either `x` or `alt` based on the `training` flag.
the `training` flag defaults to `K.learning_phase()`.
"""
if training is None:
training = learning_phase()
if training == 1 or training is True:
if callable(x):
return x()
else:
return x
elif training == 0 or training is False:
if callable(alt):
return alt()
else:
return alt
# else: assume learning phase is a placeholder tensor.
x = switch(training, x, alt)
return x
@keras_export('keras.backend.in_test_phase')
def in_test_phase(x, alt, training=None):
"""Selects `x` in test phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Arguments:
x: What to return in test phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
Returns:
Either `x` or `alt` based on `K.learning_phase`.
"""
return in_train_phase(alt, x, training=training)
# NN OPERATIONS
@keras_export('keras.backend.relu')
def relu(x, alpha=0., max_value=None, threshold=0):
"""Rectified linear unit.
With default values, it returns element-wise `max(x, 0)`.
Otherwise, it follows:
`f(x) = max_value` for `x >= max_value`,
`f(x) = x` for `threshold <= x < max_value`,
`f(x) = alpha * (x - threshold)` otherwise.
Arguments:
x: A tensor or variable.
alpha: A scalar, slope of negative section (default=`0.`).
max_value: float. Saturation threshold.
threshold: float. Threshold value for thresholded activation.
Returns:
A tensor.
"""
if alpha != 0.:
if max_value is None and threshold == 0:
return nn.leaky_relu(x, alpha=alpha)
if threshold != 0:
negative_part = nn.relu(-x + threshold)
else:
negative_part = nn.relu(-x)
clip_max = max_value is not None
if threshold != 0:
# computes x for x > threshold else 0
x = x * math_ops.cast(math_ops.greater(x, threshold), floatx())
elif max_value == 6:
# if no threshold, then can use nn.relu6 native TF op for performance
x = nn.relu6(x)
clip_max = False
else:
x = nn.relu(x)
if clip_max:
max_value = _to_tensor(max_value, x.dtype.base_dtype)
zero = _to_tensor(0., x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, max_value)
if alpha != 0.:
alpha = _to_tensor(alpha, x.dtype.base_dtype)
x -= alpha * negative_part
return x
@keras_export('keras.backend.elu')
def elu(x, alpha=1.):
"""Exponential linear unit.
Arguments:
x: A tensor or variable to compute the activation function for.
alpha: A scalar, slope of negative section.
Returns:
A tensor.
"""
res = nn.elu(x)
if alpha == 1:
return res
else:
return array_ops.where(x > 0, res, alpha * res)
@keras_export('keras.backend.softmax')
def softmax(x, axis=-1):
"""Softmax of a tensor.
Arguments:
x: A tensor or variable.
axis: The dimension softmax would be performed on.
The default is -1 which indicates the last dimension.
Returns:
A tensor.
"""
return nn.softmax(x, axis=axis)
@keras_export('keras.backend.softplus')
def softplus(x):
"""Softplus of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softplus(x)
@keras_export('keras.backend.softsign')
def softsign(x):
"""Softsign of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softsign(x)
@keras_export('keras.backend.categorical_crossentropy')
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor of the same shape as `output`.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_last', and `axis=1` corresponds to data format
`channels_first`.
Returns:
Output tensor.
Raises:
ValueError: if `axis` is neither -1 nor one of the axes of `output`.
"""
if not from_logits:
if context.executing_eagerly() or output.op.type != 'Softmax':
axis = axis % len(output.shape)
# scale preds so that the class probas of each sample sum to 1
output = output / math_ops.reduce_sum(output, axis, True)
# manual computation of crossentropy
epsilon_ = _to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
return -math_ops.reduce_sum(target * math_ops.log(output), axis)
else:
# When softmax activation function is used for output operation, we
# use logits from the softmax function directly to compute loss in order
# to prevent collapsing zero when training.
# See b/117284466
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
return nn.softmax_cross_entropy_with_logits_v2(labels=target, logits=output)
@keras_export('keras.backend.sparse_categorical_crossentropy')
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy with integer targets.
Arguments:
target: An integer tensor.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_ |
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2014 Uwe Hermann <uwe@hermann-uwe.de>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
# Addresses/devices. Items that are not listed are reserved/unknown.
address = {
| 0x40: 'Matsui TV',
}
digits = {
0: ['0', '0'],
1: ['1', '1'],
2: ['2', '2'],
3: ['3', '3'],
4: ['4', '4'],
5: ['5', '5'],
6: ['6', '6'],
7: ['7', '7'],
8: ['8', '8'],
9: ['9', '9'],
}
# Commands. Items that are not listed are reserved/unknown.
command = {
0x40: dict(list(digits.items()) + list({
11: ['-/--', '-/--'],
16: ['Mute', 'M'],
18: ['Standby | ', 'StBy'],
26: ['Volume up', 'Vol+'],
27: ['Program up', 'P+'],
30: ['Volume down', 'Vol-'],
31: ['Program down', 'P-'],
68: ['AV', 'AV'],
}.items())),
}
|
class Capture(object):
"""
Generic Capture entity. Base class
that both Screenshot and Pdiff can inherit from.
"""
def __init__(self, img_path):
if img_path is not None:
try:
self.path = img_path
self.hashvalue = self._set_hash()
except IOError:
self.path = None
self.hashvalue = None
def _set_hash(self):
from hashlib import md5
md5hasher = md5()
with open(self.path, 'r') as f:
data = f.read()
md5hasher.update(data)
return str(md5hasher.hexdigest())
def to_string(self):
entity = ""
for each in self.__dict__:
if each[0] != "_":
| entity += " | %s: %s \n" % (each, self.__dict__.get(each))
return entity
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless | required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
from airflow.models import DAG
from airflow.operators.dummy_operator impo | rt DummyOperator
from airflow.utils.timezone import datetime
class DummyWithOnKill(DummyOperator):
def execute(self, context):
import os
# This runs extra processes, so that we can be sure that we correctly
# tidy up all processes launched by a task when killing
if not os.fork():
os.system('sleep 10')
time.sleep(10)
def on_kill(self):
self.log.info("Executing on_kill")
with open("/tmp/airflow_on_kill", "w") as f:
f.write("ON_KILL_TEST")
# DAG tests backfill with pooled tasks
# Previously backfill would queue the task but never run it
dag1 = DAG(dag_id='test_on_kill', start_date=datetime(2015, 1, 1))
dag1_task1 = DummyWithOnKill(task_id='task1', dag=dag1, owner='airflow')
|
# -*- coding: utf-8 -*-
import sys
import os
from datetime import date
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-calaccess-processed-data'
copyright = u'%s, California Civic Data Coalition' % date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ig | nored prefixes for module in | dex sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-calaccess-processed-datadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'django-calaccess-processed-data.tex', u'django-calaccess-processed-data Documentation',
u'California Civic Data Coalition', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-calaccess-processed-data', u'django-calaccess-processed-data Documentation',
[u'California Civic Data Coalition'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-calaccess-processed-data', u'django-calaccess-processed-data Documentation',
u'California Civic Data Coalition', 'django-calaccess-processed-data', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
"""
Copyright 2007, 2008, 2009 Free Software Foundation, Inc.
This file is part of GNU Radio
OpenCV Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
OpenCV Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from Element import Element
import Utils
import Colors
from .. base import odict
from Constants import BORDER_PROXIMITY_SENSITIVITY
from Constants import \
BLOCK_LABEL_PADDING, \
PORT_SEPARATION, LABEL_SEPARATION, \
PORT_BORDER_SEPARATION, POSSIBLE_ROTATIONS
import pygtk
pygtk.require('2.0')
import gtk
import pango
BLOCK_MARKUP_TMPL="""\
#set $foreground = $block.is_valid() and 'black' or 'red'
<span foreground="$foreground" font_desc="Sans 8"><b>$encode($block.get_name())</b></span>"""
class Block(Element):
"""The graphical signal block."""
def __init__(self):
"""
Block contructor.
Add graphics related params to the block.
"""
#add the position param
self.get_params().append(self.get_parent().get_parent().Param(
block=self,
n=odict({
'name': 'GUI Coordinate',
'key': '_coordinate',
'type': 'raw',
'value': '(0, 0)',
'hide': 'all',
})
))
self.get_params().append(self.get_parent().get_parent().Param(
block=self,
n=odict({
'name': 'GUI Rotation',
'key': '_rotation',
'type': 'raw',
'value': '0',
'hide': 'all',
})
))
Element.__init__(self)
def get_coordinate(self):
"""
Get the coordinate from the position param.
Returns:
the coordinate tuple (x, y) or (0, 0) if failure
"""
try: #should evaluate to tuple
coor = eval(self.get_param('_coordinate').get_value())
x, y = map(int, coor)
fgW,fgH = self.get_parent().get_size()
if x <= 0:
x = 0
elif x >= fgW - BORDER_PROXIMITY_SENSITIVITY:
x = fgW - BORDER_PROXIMITY_SENSITIVITY
if y <= 0:
y = 0
elif y >= fgH - BORDER_PROXIMITY_SENSITIVITY:
y = fgH - BORDER_PROXIMITY_SENSITIVITY
return (x, y)
except:
self.set_coordinate((0, 0))
return (0, 0)
def set_coordinate(self, coor):
"""
Set the coordinate into the position param.
Args:
coor: the coordinate tuple (x, y)
"""
self.get_param('_coordinate').set_value(str(coor))
def get_rotation(self):
"""
Get the rotation from the position param.
Returns:
the rotation in degrees or 0 if failure
"""
try: #should evaluate to dict
rotation = eval(self.get_param('_rotation').get_value())
return int(rotation)
except:
self.set_rotation(POSSIBLE_ROTATIONS[0])
return POSSIBL | E_ROTATIONS[0]
| def set_rotation(self, rot):
"""
Set the rotation into the position param.
Args:
rot: the rotation in degrees
"""
self.get_param('_rotation').set_value(str(rot))
def create_shapes(self):
"""Update the block, parameters, and ports when a change occurs."""
Element.create_shapes(self)
if self.is_horizontal(): self.add_area((0, 0), (self.W, self.H))
elif self.is_vertical(): self.add_area((0, 0), (self.H, self.W))
def create_labels(self):
"""Create the labels for the signal block."""
Element.create_labels(self)
self._bg_color = self.get_enabled() and Colors.BLOCK_ENABLED_COLOR or Colors.BLOCK_DISABLED_COLOR
layouts = list()
#create the main layout
layout = gtk.DrawingArea().create_pango_layout('')
layouts.append(layout)
layout.set_markup(Utils.parse_template(BLOCK_MARKUP_TMPL, block=self))
self.label_width, self.label_height = layout.get_pixel_size()
#display the params
markups = [param.get_markup() for param in self.get_params() if param.get_hide() not in ('all', 'part')]
if markups:
layout = gtk.DrawingArea().create_pango_layout('')
layout.set_spacing(LABEL_SEPARATION*pango.SCALE)
layout.set_markup('\n'.join(markups))
layouts.append(layout)
w,h = layout.get_pixel_size()
self.label_width = max(w, self.label_width)
self.label_height += h + LABEL_SEPARATION
width = self.label_width
height = self.label_height
#setup the pixmap
pixmap = self.get_parent().new_pixmap(width, height)
gc = pixmap.new_gc()
gc.set_foreground(self._bg_color)
pixmap.draw_rectangle(gc, True, 0, 0, width, height)
#draw the layouts
h_off = 0
for i,layout in enumerate(layouts):
w,h = layout.get_pixel_size()
if i == 0: w_off = (width-w)/2
else: w_off = 0
pixmap.draw_layout(gc, w_off, h_off, layout)
h_off = h + h_off + LABEL_SEPARATION
#create vertical and horizontal pixmaps
self.horizontal_label = pixmap
if self.is_vertical():
self.vertical_label = self.get_parent().new_pixmap(height, width)
Utils.rotate_pixmap(gc, self.horizontal_label, self.vertical_label)
#calculate width and height needed
self.W = self.label_width + 2*BLOCK_LABEL_PADDING
self.H = max(*(
[self.label_height+2*BLOCK_LABEL_PADDING] + [2*PORT_BORDER_SEPARATION + \
sum([port.H + PORT_SEPARATION for port in ports]) - PORT_SEPARATION
for ports in (self.get_sources_gui(), self.get_sinks_gui())] +
[4*PORT_BORDER_SEPARATION + \
sum([(port.H) + PORT_SEPARATION for port in ports]) - PORT_SEPARATION
for ports in ([i for i in self.get_sources_gui() if i.get_type() == 'bus'], [i for i in self.get_sinks_gui() if i.get_type() == 'bus'])]
))
def draw(self, gc, window):
"""
Draw the signal block with label and inputs/outputs.
Args:
gc: the graphics context
window: the gtk window to draw on
"""
x, y = self.get_coordinate()
#draw main block
Element.draw(
self, gc, window, bg_color=self._bg_color,
border_color=self.is_highlighted() and Colors.HIGHLIGHT_COLOR or Colors.BORDER_COLOR,
)
#draw label image
if self.is_horizontal():
window.draw_drawable(gc, self.horizontal_label, 0, 0, x+BLOCK_LABEL_PADDING, y+(self.H-self.label_height)/2, -1, -1)
elif self.is_vertical():
window.draw_drawable(gc, self.vertical_label, 0, 0, x+(self.H-self.label_height)/2, y+BLOCK_LABEL_PADDING, -1, -1)
#draw ports
for port in self.get_ports_gui():
port.draw(gc, window)
def what_is_selected(self, coor, coor_m=None):
"""
Get the element that is selected.
Args:
coor: the (x,y) tuple
coor_m: the (x_m, y_m) tuple
Returns:
this block, a port, or None
"""
for port in self.get_ports_gui():
port_selected = port.what_is_selected(coor, coor_m)
if port_selected: return port_selected
return Element.what_is_selected(self, coor, coor_m)
|
import logging
import pathlib
import pytest
from datetime import date, time
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from chronophore.models import Base, Entry, User
logging.disable(logging.CRITICAL)
@pytest.fixture()
def nonexistent_file(tmpdir, request):
"""Return a path to an empty config file.
Remove the file when a test is finished with it.
"""
data_dir = pathlib.Path(str(tmpdir))
nonexistent = data_dir.joinpath('nonexistent')
if nonexistent.exists():
nonexistent.unlink()
def tearDown():
if nonexistent.exists():
nonexistent.unlink()
request.addfinalizer(tearDown)
return nonexistent
@pytest.fixture()
def invalid_file(tmpdir, request):
"""Return a path to an invalid config file.
Remove the file when a test is finished with it.
"""
data_dir = pathlib.Path(str(tmpdir))
invalid_file = data_dir.joinpath('invalid')
with invalid_file.open('w') as f:
f.write('this is invalid')
def tearDown():
if invalid_file.exists():
invalid_file.unlink()
request.addfinalizer(tearDown)
return invalid_file
@pytest.fixture()
def db_session(request, test_users, test_entries):
"""Create an in-memory sqlite database, add
some test users and entries, and return an
sqlalchemy session to it.
Close the session when the test is finished with it.
"""
engine = create_engine('sqlite:///:memory:')
Session = sessionmaker(bind=engine)
Base.metadata.create_all(engine)
session = Session()
session.add_all([user for user in test_users.values()])
session.add_all(test_entries)
def tearDown():
session.close()
request.addfinalizer(tearDown)
return session
@pytest.fixture()
def test_users():
test_users = dict(
frodo=User(
user_id='888000000',
date_joined=date(2014, 12, 11),
date_left=None,
education_plan=False,
personal_email='baggins.frodo@gmail.com',
first_name='Frodo',
last_name='Baggins',
major='Medicine',
is_student=True,
is_tutor=True,
),
sam=User(
user_id='888111111',
date_joined=date(2015, 2, 16),
date_left=None,
education_plan=True,
personal_email='gamgee.samwise@gmail.com',
first_name='Sam',
last_name='Gamgee',
major='Agriculture',
is_student=True,
is_tutor=False,
),
merry=User(
user_id='888222222',
date_joined=date(2015, 4, 12),
date_left=date(2016, 3, 24),
education_plan=True,
personal_email='brandybuck.merriadoc@gmail.com',
first_name='Merry',
last_name='Brandybuck',
major='Physics',
is_student=False,
is_tutor=True,
),
pippin=User(
user_id='888333333',
date_joined=date(2015, 2, 16),
date_left=None,
education_plan=False,
personal_email='took.peregrin@gmail.com',
first_name='Pippin',
last_name='Took',
major='Botany',
is_student=True,
is_tutor=False,
),
gandalf=User(
user_id='888444444',
date_joined=date(2010, 10, 10),
date_left=None,
education_plan=False,
personal_email='mithrandir@gmail.com',
first_name='Gandalf',
last_name='the Grey',
major='Computer Science',
is_student=False,
is_tutor=True,
),
)
return test_users
@pytest.fixture()
def test_entries():
test_entries = [
Entry(
uuid='4407d790-a05f-45cb-bcd5-6023ce9500bf',
date=date(2016, 2, 17),
time_in=time(10, 45, 23),
time_out=None,
user_id='888333333',
user_type='student',
),
Entry(
uuid='1f4f10a4-b0c6-43bf-94f4-9ce6e3e204d2',
date=date(2016, 2, 17),
time_in=time(10, 45, 48),
time_out=time(13, 30, 18),
user_id='888222222',
user_type='tutor',
),
Entry(
uuid='7b4ae0fc-3801-4412-998f-ace14829d150',
date=date(2016, 2, 17),
time_in=time(12, 45, 9),
time_out=time(16, 44, 56),
user_id='888111111',
user_type='student',
),
Entry(
uuid='42a1eab2-cb94-4d05-9bab-e1a021f7f949',
| date=date(2016, 2, 17),
time_in=time(10, 45, 48),
time_out=None,
user_id='888222222',
user_type='tutor',
),
]
return test_entries
| |
# Relative to (Blade outlet - Blade inlet)
_DefinedValues_dict["Rot_Internal_tongeangle"] = 45
_DefinedValues_dict["Rot_Internal_and_exit_tongue_separation_adim"] = 3 # Relative to Blade thickness
_DefinedValues_dict["Rot_Internal_tongLength_adim"] = 0.6 # Relative to Blade inlet Radius
_DefinedValues_dict["InletRadius_adim"] = 1.35 # Relative to Blade inlet Radius
# only for 2D
#_DefinedValues_dict["TwoD_inlet_adim"] = 0.7 # This is inlet circle * Rot_in
# Mesh fineness definition
_DefinedValues_dict["Mesh_scale_factor"] = 1
_DefinedValues_dict["Mesh_VOLUTE_max_area"] = 50
_DefinedValues_dict["Mesh_VOLUTE_min_area"] = 10
_DefinedValues_dict["Mesh_ROTOR_max_area"] = 30
_DefinedValues_dict["Mesh_ROTOR_min_area"] = 2
_DefinedValues_dict["Mesh_INLET_max_area"] = 15
_DefinedValues_dict["Mesh_INLET_min_area"] = 10
for i in range(len(VariablesToModify)):
if VariablesToModify[i] in _DefinedValues_dict:
if type(VariablesToModify[i+1]) is not str:
_DefinedValues_dict[VariablesToModify[i]]=VariablesToModify[i+1]
else: raise RuntimeError, "After variable %s there isn't a number % dicc[VariablesToModify[i]]"
return _DefinedValues_dict
def ComputeGeometry(ModifiedVariables):
# For modifying a variable in the template from the user defined values (function UserValues). First import the definition with "Var = ModifiedVariables[...]" and then export it with "_returnDicc[...] = nbr "
# Definitions
_returnDicc = dict()
Ri = ModifiedVariables["Blade_Ri"]
Ro = ModifiedVariables["Blade_Ro"]
gamma = ModifiedVariables["Blade_gamma"]
B1 = ModifiedVariables["Blade_B1"]
B2 = ModifiedVariables["Blade_B2"]
Blade_Cut_RotorSpanfract = ModifiedVariables["Blade_Cut_RotorSpanfract"]
Blade_Thickness =ModifiedVariables["Blade_Thickness"]
Blade_LeadingEdge_Cut_Height_frac =ModifiedVariables["Blade_LeadingEdge_Cut_Height_frac"]
Blade_Height = ModifiedVariables["Blade_Height"]
Blade_TrailingEdge_Cut_Height =ModifiedVariables["Blade_TrailingEdge_Cut_Height_fac"]*Blade_Height
Blade_Number =ModifiedVariables["Blade_Number"]
Rot_in = ModifiedVariables["Rot_in"]
Rot_out = ModifiedVariables["Rot_out"]
Rot_out_unit_step_fraction = ModifiedVariables["Rot_out_unit_step_fraction"]
Rot_Height = ModifiedVariables["Rot_Height"]
Rot_out_ceiling_fraction = ModifiedVariables["Rot_out_ceiling_fraction"]
Rot_Internal_tongeangle = ModifiedVariables["Rot_Internal_tongeangle"]
Rot_Internal_and_exit_tongue_separation = ModifiedVariables["Rot_Internal_and_exit_tongue_separation_adim"] * Blade_Thickness
Rot_Internal_tongLength = ModifiedVariables["Rot_Internal_tongLength_adim"] * Ri
InletRadius_adim = ModifiedVariables["InletRadius_adim"]
InletRadius = InletRadius_adim * Ri
Vol_beta = ModifiedVariables["Vol_beta"] # This is the angle at which the volute tongue starts to create the seashell respect to the tangent of the origin centered circle crossing that point.
Vol_omega = ModifiedVariables["Vol_omega"] #
Vol_gamma = ModifiedVariables["Vol_gamma"] # The user will introduce an angle like 40, but we work with 40+180
Vol_F = ModifiedVariables["Vol_F_adim"]*Rot_out # Distance between Rotor outlet and Vol_n0 divided by Rot_out
Vol_Rc = ModifiedVariables["Vol_Rc_adim"]*Rot_out # Radius of the volute tonge
Vol_W = ModifiedVariables["Vol_W_adim"] *Rot_out
Vol_n1x = ModifiedVariables["Vol_N1x_adim"] *Rot_out
Vol_n2y = ModifiedVariables["Vol_N2y_adim"] *Rot_out
Vol_n3x = ModifiedVariables["Vol_N3x_adim"] *Rot_out
Vol_n4y = ModifiedVariables["Vol_N4y_adim"]*Rot_out
Vol_Height = ModifiedVariables["Vol_Height"]
Vol_difussor_length = ModifiedVariables["Vol_difussor_length_adim"]*Rot_out
Mesh_VOLUTE_max_area = ModifiedVariables["Mesh_VOLUTE_max_area"]
Mesh_VOLUTE_min_area = ModifiedVariables["Mesh_VOLUTE_min_area"]
Mesh_ROTOR_max_area = ModifiedVariables["Mesh_ROTOR_max_area"]
Mesh_ROTOR_min_area = ModifiedVariables["Mesh_ROTOR_min_area"]
Mesh_INLET_max_area = ModifiedVariables["Mesh_INLET_max_area"]
Mesh_INLET_min_area = ModifiedVariables["Mesh_INLET_min_area"]
HugeValue = 20 * Rot_out
# CALCULATIONS OF BLADE
gamma = gamma/360*2*pi
# theta = theta/360*2*pi
B1 = B1/360*2*pi
B2 = B2/360*2*pi
"""
# Angle of attack
uy=sin(pi/2+gamma)
ux=cos(pi/2+gamma)
N1px = ux
N1py = uy
"""
N1x = 0
N1y = Ri
Ux = -sin(B1)
Uy = cos(B1)
Vx = -Uy
Vy = Ux
# Converting from gamma to omega
T = (-2*cos(gamma)*Ri+sqrt(4*cos(gamma)**2*Ri**2 + 4* (Ro**2-Ri**2))) / (2)
N2x = -sin(gamma)*T
N2y = Ri + cos(gamma) *T
omega = acos(N2y/Ro)
Theta = gamma + B2
Sx = - cos(pi/2-Theta)
Sy = sin(pi/2-Theta)
CLx = -Sy
CLy = Sx
# Solve location geometry constrains
Crod=(N1y*Sx-N2y*Sx-N1x*Sy+N2x*Sy)/(CLy*Sx-CLx*Sy+Sy*Vx-Sx*Vy)
H=-(CLy*N1x-CLx*N1y-CLy*N2x+CLx*N2y+N1y*Vx-N2y*Vx-N1x*Vy+N2x*Vy)/(CLy*Sx-CLx*Sy+Sy*Vx-Sx*Vy)
Cx=N1x+Vx*Crod
Cy=N1y+Vy*Crod
Lx=Cx-CLx*Crod
Ly=Cy-CLy*Crod
N3x=N2x-Sy*Blade_Thickness
N3y=N2y+Sx*Blade_Thickness
N4x=Lx+CLx*Blade_Thickness
N4y=Ly+CLy*Blade_Thickness
N5x=N1x+Vx*Blade_Thickness
N5y=N1y+Vy*Blade_Thickness
# For 3D, calculate Blade N1yup, and the box (face perpendicular to [N1px,N1py] is [N1py, -N1px]) that will cut the Blade.
Blade_N1zup = N1y + Blade_Height
Blade_N5zup = N5y + Blade_Height
_BladeChord = sqrt((N2y-N1y)**2+(N2x-N1x)**2)
Blade_CutNodeY = N1y + _BladeChord * Blade_Cut_RotorSpanfract * cos(gamma)
Blade_CutNodeX = N1x - _BladeChord * Blade_Cut_RotorSpanfract * sin(gamma)
# RETURN | BLADES
_returnDicc["Blade_N1x"] = N1x
_returnDicc["Blade_N1y"] = N1y
_returnDicc["Blade_N2x"] = N2x
_returnDicc["Blade_N2y"] = N2y
_returnDicc["Blade_N3x"] = | N3x
_returnDicc["Blade_N3y"] = N3y
_returnDicc["Blade_N4x"] = N4x
_returnDicc["Blade_N4y"] = N4y
_returnDicc["Blade_N5x"] = N5x
_returnDicc["Blade_N5y"] = N5y
_returnDicc["Blade_Lx"] = Lx
_returnDicc["Blade_Ly"] = Ly
_returnDicc["Blade_N1px"] = Ux
_returnDicc["Blade_N1py"] = Uy
_returnDicc["Blade_Sx"] = Sx
_returnDicc["Blade_Sy"] = Sy
_returnDicc["Blade_Ri"] = Ri
_returnDicc["Blade_Ro"] = Ro
_returnDicc["Blade_Thickness"] = Blade_Thickness
_returnDicc["Blade_Thickness_ByMinusThree"] = - Blade_Thickness*3
_returnDicc["Halve_Blade_Thickness"] = Blade_Thickness/2
_returnDicc["Minus_Blade_Thickness"] = -Blade_Thickness
_returnDicc["Blade_Height"] = Blade_Height
_returnDicc["Blade_Number"] = Blade_Number
_returnDicc["Blade_CutNodeY"] = Blade_CutNodeY
_returnDicc["Blade_CutNodeX"] = Blade_CutNodeX
_returnDicc["Blade_LeadingEdge_Cut_Height"] = Blade_LeadingEdge_Cut_Height_frac * Blade_Height
_returnDicc["Blade_TrailingEdge_Cut_Height"] = Blade_TrailingEdge_Cut_Height
# CALCULATION OF ROTOR
_returnDicc["Rot_in"] = Rot_in
_returnDicc["Rot_out"] = Rot_out
_returnDicc["Rot_Height"] = Rot_Height
_returnDicc["InletRadius"] = InletRadius
_returnDicc["Rot_out_unit_step"] = Rot_out_unit_step_fraction * Rot_out
_returnDicc["Rot_out_ceiling_fraction"] = Rot_out_ceiling_fraction
_returnDicc["Rot_Internal_tongeangle"] = Rot_Internal_tongeangle
_returnDicc["Rot_Internal_and_exit_tongue_separation"] = Rot_Internal_and_exit_tongue_separation
_returnDicc["Rot_Internal_tongLength"] = Rot_Internal_tongLength
# CALCULATIONS OF VOLUTE
# Convert grad to rad
Vol_gamma = Vol_gamma/360*2*pi
Vol_omega = Vol_omega/360*2*pi
Vol_beta = Vol_beta/360*2*pi
# Start Calculations
Vol_vx = -sin(Vol_omega)
Vol_vy = -cos(Vol_omega)
Vol_ux = Vol_vy
Vol_uy = Vol_vx
Vol_nstart_x = -cos(Vol_gamma)*(Rot_out+Vol_F+Vol_Rc)
Vol_nstart_y = sin(Vol_gamma)*(Rot_out+Vol_F+Vol_Rc)
Vol_cx = Vol_nstart_x + Vol_ux * Vol_Rc
Vol_cy = Vol_nstart_y + Vol_uy * Vol_Rc
# start iteration to find N0
Vol_gamma_des = Vol_gamma
for i in range(7):
Vol_n0x_rel = Vol_Rc*cos(3*pi/2 + Vol_gamma_des + Vol_beta)
Vol_n0y_rel = Vol_Rc*sin(3*pi/2 + Vol_gamma_des + Vol_beta)
Vol_nstart_x_rel = Vol_Rc*cos(pi-Vol_omega)
Vol_nstart_y_rel = Vol_Rc*sin(pi-Vol_omega)
Vol_Ax = Vol_n0x_rel - Vol_nstart_x_rel
Vol_Ay = Vol_n0y_rel - Vol_nstart_y_rel
Vol_n0x_abs = Vol_nstart_x + Vol_Ax
Vol_n0y_abs = Vol_nstart_y + Vol_Ay
Vol_gamma_des = pi/2-atan( |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from cms.extensions import PageExtensionAdmin, TitleExtensionAdmin
from django.conf import settings
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from .forms import TitleMetaAdminForm
from .models import PageMeta, TitleMeta
class PageMetaAdmin(PageExtensionAdmin):
raw_id_fields = ('og_author',)
fieldsets = (
(None, {'fields': ('image',)}),
(_('OpenGraph'), {
'fields': (
'og_type', ('og_author', 'og_author_url', 'og_author_fbid'),
('og_publisher', 'og_app_id')
),
'classes': ('collapse',)
}),
(_('Twitter Cards'), {
'fields': ('twitter_type', 'twitter_author'),
'classes': ('collapse',)
}),
(_('Google+ Snippets'), {
'fields': ('gplus_type', 'gplus_author'),
'classes': ('collapse',)
}),
)
|
class Media:
css = {
'all': ('%sdjangocms_page_meta/css/%s' % (
settings.STATIC_URL, 'djangocms_page_meta_admin.css'),)
| }
def get_model_perms(self, request):
"""
Return empty perms dict thus hiding the model from admin index.
"""
return {}
admin.site.register(PageMeta, PageMetaAdmin)
class TitleMetaAdmin(TitleExtensionAdmin):
form = TitleMetaAdminForm
class Media:
css = {
'all': ('%sdjangocms_page_meta/css/%s' % (
settings.STATIC_URL, 'djangocms_page_meta_admin.css'),)
}
def get_model_perms(self, request):
"""
Return empty perms dict thus hiding the model from admin index.
"""
return {}
admin.site.register(TitleMeta, TitleMetaAdmin)
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundat | ion; either version 2 of the License, or
# (at your option) any later version.
"""Make osversion.osmajor_id non-NULLable
Revision ID: 5ab66e956c6b
Revises: 286ed23a5c1b
Create Date: 2017-12-20 15:54:38.825703
"""
# revision identifiers, used by Alembic.
revision | = '5ab66e956c6b'
down_revision = '286ed23a5c1b'
from alembic import op
from sqlalchemy import Integer
def upgrade():
op.alter_column('osversion', 'osmajor_id', existing_type=Integer, nullable=False)
def downgrade():
op.alter_column('osversion', 'osmajor_id', existing_type=Integer, nullable=True)
|
import logging
from django.db import connection
from django.contrib.auth.models import User
from tenant_schemas.utils import get_tenant_model
from tenant_schemas.test.cases import TenantTestCase
import pytest
logger = logging.getLogger('tests.util')
def make_tenant(schema='test', domain='tenant.test.com', username='tester'):
"""Returns a tuple: (a tenant schema, an administrative user for it).
`schema`: Schema name
`domain`: Domain for the tenant site
`username`: Username to be admin of the site
Both user and tenant are created if they don't already exist, and the db
connection is set to that tenant. Logs to tests.util, level INFO.
Tenant creation is conditional because it requires significant time.
"""
TenantTestCase.sync_shared()
# create or get the user
user, created = User.objects.get_or_create(username=username)
if created:
logger.info("Created user '{}'.".format(user))
else:
logger.info("User '{}' exists, not creating it.".format(user))
# create or get the tenant
goc = get_tenant_model().objects.get_or_create
d = {'domain_url': domain, 'schema_name': schema, 'user': user}
tenant, created = goc(schema_name=schema, defaults=d)
if created:
msg = "No schema nam | ed '{}' detected; creating one"
logger.info(msg.format(schema))
tenant.create_schema(check_if_exists=True)
else:
logger.info("Tenant with schema name '{}' found".format(schema))
connection.set_tenant(tenant)
return (user, tenant)
@pytest.fixture
def set_tenant(request):
t | enant = get_tenant_model().objects.get(schema_name='test')
connection.set_tenant(tenant)
|
#!/usr/bin/python
import urllib, urllib2, json, sys
import splunk.entity as entity
# access the credentials in /servicesNS/nobody/app_name/admin/passwords
def getCredentials(sessionKey):
myapp = 'tado'
try:
# list all credentials
entities = entity.getEntities(['admin', 'passwords'], namespace=myapp, owner='nobody', sessionKey=sessionKey)
except Exception, e:
raise Exception("Could not get %s credentials from splunk. Error: %s" % (myapp, str(e)))
# return first set of credentials
for i, c in entities.items():
return c['username'], c['clear_password']
raise Exception("No credentials have been found")
def main():
# read session key sent from splunkd
sessionKey = sys.stdin.readline().strip()
if len(sessionKey) == 0:
sys.stderr.write("Did not receive a session key from splunkd. " +
"Please enable passAuth in inputs.conf for this " +
"script\n")
exit(2)
username, password = getCredentials(sessionKey)
token = getAuth(username, password)
homeId = getHomeId(token)
doRequest(token,homeId)
def getAuth(email, password):
data = dict(client_id="tado | -webapp",grant_type="password",password=password,scope="home.user", username=email )
authUrl = "https://my.tado.com/oauth/token"
method = "POST"
handler = urllib2.HTTPHandler()
opener = urllib2.build_opener(handler)
data = urllib.urlencode(data)
request = urllib2.Request(authUrl, data=data)
request.get_method = lambda: method
try:
connection = opener.open(request)
except urllib2.HTTPError,e:
connection = e
if connection.code == 200:
responseData = str(connec | tion.read())
jsonList = json.loads(responseData)
return jsonList['access_token']
else:
print "errorCode="+str(connection.code)
def getHomeId(token):
url = "https://my.tado.com/api/v2/me"
req = urllib2.Request(url)
req.add_header("Authorization","Bearer "+token)
handler = urllib2.HTTPHandler()
opener = urllib2.build_opener(handler)
try:
connection = opener.open(req)
except urllib2.HTTPError,e:
connection = e
if 200 <= connection.code <= 207:
responseData = str(connection.read())
jsonList = json.loads(responseData)
return jsonList['homes'][0]['id']
else:
print "errorCode="+str(connection.code)
def doRequest(token,homeId):
url = "https://my.tado.com/api/v2/homes/"+str(homeId)+"/zones/0/state"
req = urllib2.Request(url)
req.add_header("Authorization","Bearer "+token)
handler = urllib2.HTTPHandler()
opener = urllib2.build_opener(handler)
try:
connection = opener.open(req)
except urllib2.HTTPError,e:
connection = e
if 200 <= connection.code <= 207:
print connection.read()
else:
print "errorCode="+str(connection.code)
main()
|
#!/usr/bin/env python
| # -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
from curl2share.config import log_file, log_level
loglevel = {'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARN,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'NOTSET': logging.NOTSET}
logger = logging.getLogger(__name__)
logger.setLevel(loglevel[log_level])
fh = logging.FileHandler(log_file)
fh.setLevel(loglevel[log_leve | l])
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s \
- %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
|
####
#### Setup gross testing environment.
####
#import time
from behave_core. | environment import start_browser, define_target, quit_browser
from behave import *
## Run this before anything else.
def before_all(context):
| #pass
start_browser(context)
define_target(context)
#time.sleep(10)
## Do this after completing everything.
def after_all(context):
#pass
quit_browser(context)
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import unicode_literals
from datetime import datetime
import urllib
import jingo
from lib.l10n_utils.dotlang import _
@jingo.register.function
def format_tweet_body(tweet):
"""
Return a tweet in an HTML format.
@param tweet: A Tweepy Status object retrieved with the Twitter REST API.
See the developer document for details:
https://dev.twitter.com/docs/platform-objects/tweets
"""
text = tweet.text
entities = tweet.entities
# Hashtags (#something)
for hashtags in entities['hashtags']:
| hash = hashtags['text']
text = text.replace('#' + hash,
('<a href="https://twitter.com/search?q=%s&src=hash"'
' class="hash">#%s</a>' % ('%23' + urllib.quote(hash.encode('utf8')),
hash)))
# Mentions (@someone)
for user in entities['user_mentions']:
name = user['screen_name']
text = text.replace('@' + name,
('< | a href="https://twitter.com/%s" class="mention">@%s</a>'
% (urllib.quote(name.encode('utf8')), name)))
# URLs
for url in entities['urls']:
text = text.replace(url['url'],
('<a href="%s" title="%s">%s</a>'
% (url['url'], url['expanded_url'], url['display_url'])))
# Media
if entities.get('media'):
for medium in entities['media']:
text = text.replace(medium['url'],
('<a href="%s" title="%s" class="media">%s</a>'
% (medium['url'], medium['expanded_url'],
medium['display_url'])))
return text
@jingo.register.function
def format_tweet_timestamp(tweet):
"""
Return an HTML time element filled with a tweet timestamp.
@param tweet: A Tweepy Status object retrieved with the Twitter REST API.
For a tweet posted within the last 24 hours, the timestamp label should be
a relative format like "20s", "3m" or 5h", otherwise it will be a simple
date like "6 Jun". See the Display Requirements for details:
https://dev.twitter.com/terms/display-requirements
"""
now = datetime.utcnow()
created = tweet.created_at # A datetime object
diff = now - created # A timedelta Object
if diff.days == 0:
if diff.seconds < 60:
label = _('%ds') % diff.seconds
elif diff.seconds < 60 * 60:
label = _('%dm') % round(diff.seconds / 60)
else:
label = _('%dh') % round(diff.seconds / 60 / 60)
else:
label = created.strftime("%-d %b")
full = created.strftime("%Y-%m-%d %H:%M")
return ('<time datetime="%s" title="%s" itemprop="dateCreated">%s '
'<span class="full">(%s)</span></time>'
% (created.isoformat(), full, label, full))
|
DS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Ce fichier définit un objet 'importeur', chargé de contrôler le mécanisme
d'importation, initialisation, configuration, déroulement et arrêt
des modules primaires et secondaires.
On parcourt les sous-dossiers définis dans les variables :
- REP_PRIMAIRES : répertoire des modules primaires
- REP_SECONDAIRES : répertoire des modules secondaires
Il est possible de changer ces variables mais dans ce cas, une réorganisation
du projet s'impose.
Dans chaque module, on s'occupera de charger l'objet le représentant.
Par exemple, le module anaconf se définit comme suit :
* un package anaconf contenu dans REP_PRIMAIRES
* un fichier __init__.py
* une classe Anaconf
On créée un objet chargé de représenter le module. C'est cet objet qui
possède les méthodes génériques chargées d'initialiser, configurer, lancer
et arrêter un module. Les autres fichiers du module sont une boîte noir
inconnu pour l'importeur.
"""
import os
import sys
from abstraits.module import *
REP_PRIMAIRES = "primaires"
REP_SECONDAIRES = "secondaires"
class Importeur:
"""Classe chargée de créer un objet Importeur. Il contient sous la forme
d'attributs les modules primaires et secondaires chargés. Les modules
primaires et secondaires ne sont pas distingués.
On ne doit créer qu'un seul objet Importeur.
"""
nb_importeurs = 0
def __init__(self):
"""Constructeur de l'importeur. Il vérifie surtout
qu'un seul est créé.
Il prend en paramètre le parser de commande qu'il doit transmettre
à chaque module.
"""
Importeur.nb_importeurs += 1
if Importeur.nb_importeurs > 1:
raise RuntimeError("{0} importeurs ont été créés".format( \
Importeur.nb_importeurs))
def __str__(self):
"""Retourne sous ue forme un peu plus lisible les modules importés."""
ret = []
for nom_module in self.__dict__.keys():
ret.append("{0}: {1}".format(nom_module, getattr(self, \
nom_module)))
ret.sort()
return "\n".join(ret)
def tout_charger(self):
"""Méthode appelée pour charger les modules primaires et secondaires.
Par défaut, on importe tout mais on ne créée rien.
"""
# On commence par parcourir les modules primaires
for nom_package in os.listdir(os.getcwd() + "/" + REP_PRIMAIRES):
if not nom_package.startswith("__"):
package = __import__(REP_PRIMAIRES + "." + nom_package)
module = getattr(getattr(package, nom_package), \
nom_package.capitalize())
setattr(self, nom_package, module)
# On fait de même avec les modules secondaires
for nom_package in os.listdir(os.getcwd() + "/" + REP_SECONDAIRES):
if not nom_package.startswith("__"):
package = __import__(REP_SECONDAIRES + "." + nom_package)
module = getattr(getattr(package, nom_package), \
nom_package.capitalize())
setattr(self, nom_package, module)
def tout_instancier(self, parser_cmd):
"""Cette méthode permet d'instancier les modules chargés auparavant.
On se base sur le type du module (classe ou objet)
pour le créer ou non.
En effet, cette méthode doit pouvoir être appelée quand certains
modules sont instanciés, et d'autres non.
NOTE IMPORTANTE: on passe au constructeur de chaque module
self, c'est-à-dire l'importeur. Les modules en ont en effet
besoin pour interragir entre eux.
"""
for nom_module, module in self.__dict__.items():
if type(module) is type: # on doit l'instancier
setattr(self, nom_module, module(self, parser_cmd))
def tout_configurer(self):
"""Méthode permettant de configurer tous les modules qui en ont besoin.
Les modules qui doivent être configuré sont ceux instanciés.
Attention: les modules non encore instanciés sont à l'état de classe.
Tous les modules doivent donc être instanciés au minimum avant
que cette méthode ne soit appelée. Autrement dit, la méthode
tout_instancier doit être appelée auparavant.
"""
for module in self.__dict__.values():
if module.statut == INSTANCIE:
module.config()
def tout_initialiser(self):
"""Méthode permettant d'initialiser tous les modules qui en ont besoin.
Les modules à initialiser sont ceux configuré.
"""
for module in self.__dict__.values():
if module.statut == CONFIGURE:
module.init()
def tout_detruire(self):
"""Méthode permettant de détruire tous les modules qui en ont besoin.
Les modules à détruire sont ceux initialisés.
"""
for module in self.__dict__.values():
if module.statut == INITIALISE:
module.detruire()
def boucle(self):
"""Méthode appelée à chaque tour de boucle synchro.
Elle doit faire appel à la méthode boucle de chaque module primaire
ou secondaire.
"""
for module in self.__dict__.values():
module.boucle()
def module_est_charge(self, nom):
"""Retourne True si le module est déjà chargé, False sinon.
On n'a pas besoin du type du module, les modules primaires
et secondaires étant stockés de la même façon.
Attention: un module peut être chargé sans être instancié,
configuré ou initialisé.
"""
return nom in self.__dict__.keys()
def charger_module(self, parser_cmd, m_type, nom):
"""Méthode permettant de charger un module en fonction de son type et
de son nom.
Si le module est déjà chargé, on ne fait rien.
Note: à la différence de tout_charger, cette méthode créée directement
l'objet gérant le module.
"""
if m_type == "primaire":
rep = REP_PRIMAIRES
elif m_type == "secondaire":
rep = REP_SECONDAIRES
else:
raise ValueError("le type {0} n'est ni primaire ni secondaire" \
.format(type))
if self.module_est_charge(nom):
print("Le module {0} est déjà chargé.".format(nom))
else:
package = __import__(rep + "." + nom)
module = getattr(getattr(package, nom), \
nom.capitalize())
setattr(self, nom, module(self, parser_cmd))
def decharger_module(self, m_type, nom):
"""Méthode permettant de décharger un module.
Elle se charge :
- d'appeler la méthode detruire du module
- de supprimer le module des modules dans sys.modules
- de supprimer l'instance du module dans self
"""
i | f m_type == "primaire":
rep = REP_PRIMAIRES
elif m_type == "secondaire":
rep = REP_SECONDAIRES
else:
raise ValueError("le type {0} n'est ni primaire ni secondaire" \
.format(type))
nom_complet = rep + "." + nom
for cle in list(sys.modules.keys()):
if cle.startswith(nom_complet + "."):
del sys.modules[cle]
if self.module_est_charge(nom):
geta | ttr(self, nom).detuire()
delattr(self, nom)
else:
print("{0} n'est pas dans les attributs de l'importeur".format(nom))
def recharger_module(self, parser_cmd, m_type, nom):
"""Cette méthode permet de recharger un module. Elle passe par :
- decharger_module
- charger_module
"""
self.decharger_m |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def sigmoid_cross_entropy_with_logits(x, z):
return np.maximum(x, 0) - x * z + np.log(1 + np.exp(-np.abs(x)))
def sigmoid_cross_entropy_with_logits_grad(x, z):
return z - sigmoid(x)
class TestCrossEntropyOps(hu.HypothesisTestCase):
@given(
inputs=st.lists(
elements=st.integers(min_value=1, max_value=5),
min_size=1,
max_size=2,
average_size=2,
).flatmap(
lambda shape: st.tuples(
hu.arrays(
dims=shape,
elements=st.one_of(
st.floats(min_value=-1.0, max_value=-0.1),
st.floats(min_value=0.1, max_value=1.0),
)),
hu.arrays(
dims=shape,
elements=st.sampled_from([0.0, 1.0]),
),
)
),
**hu.gcs
)
def test_sigmoid_cross_entropy_with_logits(self, inputs, gc, dc):
logits, targets = inputs
def sigmoid_xentr_logit_ref(logits, targets):
s = sigmoid_cross_entropy_with_logits(logits, targets)
m = np.mean(s, axis=len(logits.shape) - 1)
return (m, )
def sigmoid_xentr_logit_grad_ref(g_out, outputs, fwd_inputs):
fwd_logits, fwd_targets = fwd_inputs
inner_size = fwd_logits.shape[-1]
m = fwd_targets - sigmoid(fwd_logits)
g_in = -np.expand_dims(g_out, axis=-1) * m / inner_size
return (g_in, None)
op = core.CreateOperator(
'SigmoidCrossEntropyWithLogits',
['logits', 'targets'],
['xentropy'])
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[logits, ta | rgets],
reference=sigmoid_xentr_ | logit_ref,
output_to_grad='xentropy',
grad_reference=sigmoid_xentr_logit_grad_ref)
@given(n=st.integers(2, 10),
b=st.integers(1, 5),
**hu.gcs_cpu_only)
def test_soft_label_cross_entropy(self, n, b, gc, dc):
# Initialize X and add 1e-2 for numerical stability
X = np.random.rand(b, n).astype(np.float32)
X = X + 1e-2
for i in range(b):
X[i] = X[i] / np.sum(X[i])
# Initialize label
label = np.random.rand(b, n).astype(np.float32)
for i in range(b):
label[i] = label[i] / np.sum(label[i])
# Reference implementation of cross entropy with soft labels
def soft_label_xentr_ref(X, label):
xent = [np.sum((-label[j][i] * np.log(max(X[j][i], 1e-20))
for i in range(len(X[0])))) for j in range(b)]
return (xent,)
op = core.CreateOperator("CrossEntropy", ["X", "label"], ["Y"])
# TODO(surya) Once CrossEntropyOp is ported to GPU, add the respective
# tests to this unit test.
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, label],
reference=soft_label_xentr_ref,
)
self.assertGradientChecks(
gc, op, [X, label], 0, [0], stepsize=1e-4, threshold=1e-2)
if __name__ == "__main__":
import unittest
unittest.main()
|
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2014 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
import re
from rest_framework.serializers import ValidationError
def name(value):
'''Matches names of people, countries and and other things.'''
if re.match(r'^[A-Za-z\s\.\-\'àèéìòóôù]+$', value) is None:
raise ValidationError('This field contains invalid characters.')
def address(value):
'''Matches street addresses.'''
if re.match(r'^[\w\s\.\-\'àèéìòóôù]+$', value) is None:
raise ValidationError('This field contains invalid characters.')
def numeric(value):
'''Matches numbers and spaces.'''
if re.match(r'^[\d\s]+$', value) is None:
raise ValidationError('This field can only contain numbers and spaces.')
def email(value):
'''Loosely matches email addresses.'''
if re.match(r'^[\w_.+-]+@[\w-]+\. | [\w\-.]+$', value) is None:
raise ValidationError('This is an invalid email address.')
def phone_international(value):
'''Loosely matches phone numbers.'''
if re.match(r'^[\d\-x\s\+\(\)]+$', value) is None:
raise ValidationError('This is an inva | lid phone number.')
def phone_domestic(value):
'''Matches domestic phone numbers.'''
if re.match(r'^\(?(\d{3})\)?\s(\d{3})-(\d{4})(\sx\d{1,5})?$', value) is None:
raise ValidationError('This is an invalid phone number.')
def nonempty(value):
'''Requires that a field be non-empty.'''
if not value:
raise ValidationError('This field is required.')
|
under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Escaping/unescaping methods for HTML, JSON, URLs, and others.
Also includes a few other miscellaneous string manipulation functions that
have crept in over time.
"""
from __future__ import absolute_import, division, print_function
import json
import re
from tornado.util import PY3, unicode_type, basestring_type
if PY3:
from urllib.parse import parse_qs as _parse_qs
import html.entities as htmlentitydefs
import urllib.parse as urllib_parse
unichr = chr
else:
from urlparse import parse_qs as _parse_qs
import htmlentitydefs
import urllib as urllib_parse
try:
import typing # noqa
except ImportError:
pass
_XHTML_ESCAPE_RE = re.compile('[&<>"\']')
_XHTML_ESCAPE_DICT = {'&': '&', '<': '<', '>': '>', '"': '"',
'\'': '''}
def xhtml_escape(value):
"""Escapes a string so it is valid within HTML or XML.
Escapes the characters ``<``, ``>``, ``"``, ``'``, and ``&``.
When used in attribute values the escaped strings must be enclosed
in quotes.
.. versionchanged:: 3.2
Added the single quote to the list of escaped characters.
"""
return _XHTML_ESCAPE_RE.sub(lambda match: _XHTML_ESCAPE_DICT[match.group(0)],
to_basestring(value))
def xhtml_unescape(value):
"""Un-escapes an XML-escaped string."""
return re.sub(r"&(#?)(\w+?);", _convert_entity, _unicode(value))
# The fact that json_encode wraps json.dumps is an implementation detail.
# Please see https://github.com/tornadoweb/tornado/pull/706
# before sending a pull request that adds **kwargs to this function.
def json_encode(value):
"""JSON-encodes the given Python object."""
# JSON permits but does not require forward slashes to be escaped.
# This is useful when json data is emitted in a <script> tag
# in HTML, as it prevents </script> tags from prematurely terminating
# the javascript. Some json libraries do this escaping by default,
# although python's standard library does not, so we do it here.
# http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped
return json.dumps(value).replace("</", "<\\/")
def json_decode(value):
"""Returns Python objects for the given JSON string."""
return json.loads(to_basestring(value))
def squeeze(value):
"""Replace all sequences of whitespace chars with a single space."""
return re.sub(r"[\x00-\x20]+", " ", value).strip()
def url_escape(value, plus=True):
"""Returns a URL-encoded version of the given value.
If ``plus`` is true (the default), spaces will be represented
as "+" instead of "%20". This is appropriate for query strings
but not for the path component of a URL. Note that this default
is the reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
quote = urllib_parse.quote_plus if plus else urllib_parse.quote
return quote(utf8(value))
# python 3 changed things around enough that we need two separate
# implementations of url_unescape. We also need our own implementation
# of parse_qs since python 3's version insists on decoding everything.
if not PY3:
def url_unescape(value, encoding='utf-8', plus=True):
"""Decodes the given value from a URL.
The argument may be either a byte or unicode string.
If encoding is None, the result will be a byte string. Otherwise,
the result is a unicode string in the specified encoding.
If ``plus`` is true (the default), plus signs will be interpreted
as spaces (literal plus signs must be represented as "%2B"). This
is appropriate for query strings and form-encoded values but not
for the path component of a URL. Note that this default is the
reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
unquote = (urllib_parse.unquote_plus if plus else urllib_parse.unquote)
if encoding is None:
return unquote(utf8(value))
else:
return unicode_type(unquote(utf8(value)), encoding)
parse_qs_bytes = _parse_qs
else:
def url_unescape(value, encoding='utf-8', plus=True):
"""Decodes the given value from a URL.
The argument may be either a byte or unicode string.
If encoding is None, the result will be a byte string. Otherwise,
the result is a unicode string in the specified encoding.
If ``plus`` is true (the default), plus signs will be interpreted
as spaces (literal plus signs must be represented as "%2B"). This
is appropriate for query strings and form-encoded values but not
for the path component of a URL. Note that this default is the
reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
if encoding is None:
if plus:
# unquote_to_bytes doesn't have a _plus variant
value = to_basestring(value).replace('+', ' ')
return urllib_parse.unquote_to_bytes(value)
else:
unquote = (urllib_parse.unquote_plus if plus
else urllib_parse.unquote)
return unquote(to_basestring(value), encoding=encoding)
def parse_qs_bytes(qs, keep_blank_values=False, strict_parsing=False):
"""Parses a query string like urlparse.parse_qs, but returns the
values as byte strings.
Keys still become type str (interpreted as latin1 in python3!)
because it's too painful to keep them as byte strings in
python3 and in practice they're nearly always ascii anyway.
"""
# This is gross, but python3 doesn't give us another way.
# Latin1 is the universal donor of character encodings.
result = _parse_qs(qs, keep_blank_values, strict_parsing,
encoding='latin1', errors='strict')
encoded = {}
for k, v in result.items():
encoded[k] = [i.encode('latin1') for i in v]
return encoded
_UTF8_TYPES = (bytes, type(None))
def utf8(value):
# type: (typing.Union[bytes,unicode_type,None])->typing.Union[bytes,None]
"""Converts a string argument to a byte string.
If the argument is already a byte string or None, it is returned unchanged.
Otherwise it must be a unicode string and is encoded as utf8.
"""
if isinstance(value, _UTF8_TYPES):
return value
if not isinstance(value, unicode_type):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value)
)
return value.encode("utf-8")
_TO_UNICODE_TYPES = (unicode_type, type(None))
def to_unicode(value):
"""Converts a string argument to a unicode string.
If the argument is already a unicode string or None, it is returned
unchanged. Otherwise it must be a byte string and is decoded as utf8.
"""
if isinstance(value, _TO_UNICODE_TYPES):
return value
if not isinstance(value, bytes):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value)
)
return value.decode("utf-8")
# to_unicode was previously named _unicode not because it was private,
# but to avoid conflicts with the built-in unicode() function/type
_unicode = to_unicode
# When dealing with the standard library across python 2 and 3 it is
# sometimes useful to have a direct conversion to the native string type
if str is unicode_type:
native_str = to_unico | de
else:
| native_str = utf8
_BASESTRING_TYPES = (basestring_type, type(None))
def to_basestring(value):
"""Converts a string argument to a subclass of basestring.
In python2, byte and unicode strings are mostly interchangeable,
so functions that deal with a user-supplied argument in combination
with ascii string constants can use |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Unit tests for comprehend_demo_resources.py
"""
from io import BytesIO
import json
import tarfile
import time
from unittest.mock import MagicMock
import uuid
import boto3
from botocore.exceptions import ClientError
import pytest
from comprehend_demo_resources import ComprehendDemoResources
@pytest.mark.parametrize('error_code,stop_on_method', [
(None, None),
('TestException', 'stub_create_bucket')])
def test_setup(make_stubber, stub_runner, monkeypatch, error_code, stop_on_method):
s3_resource = boto3.resource('s3')
s3_stubber = make_stubber(s3_resource.meta.client)
iam_resource = boto3.resource('iam')
iam_stubber = make_stubber(iam_resource.meta.client)
demo_resources = ComprehendDemoResources(s3_resource, iam_resource)
demo_name = 'test-name'
bucket_name = 'doc-example-bucket-test-uuid'
role_name = f'{demo_name}-role'
policy_name = f'{demo_name}-policy'
policy_arn = f'arn:aws:iam:REGION:123456789012:policy/{p | olicy_name}'
monkeypatch.setattr(uuid, 'uuid4', lambda: 'test-uuid')
monkeypatch.setattr(time, 'sleep', lambda x: None)
with stub_runner(error_code, stop_on_method) as runner:
runner.add(
s3_stubber.stub_create_bucket, bucket_name,
s3_resource.meta.client.meta.region_name)
runner.add(iam_stubber.stub_create_role, role_name)
runner.add(iam_stubber.stub_get_role, role_name) |
runner.add(iam_stubber.stub_create_policy, policy_name, policy_arn)
runner.add(iam_stubber.stub_get_policy, policy_arn)
runner.add(iam_stubber.stub_attach_role_policy, role_name, policy_arn)
if error_code is None:
demo_resources.setup(demo_name)
assert demo_resources.bucket.name == bucket_name
assert demo_resources.data_access_role.name == role_name
else:
with pytest.raises(ClientError) as exc_info:
demo_resources.setup(demo_name)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code,file_name,file_contents,output', [
(None, 'name1.jsonl',
[json.dumps('content1'), json.dumps('content2')],
['content1', 'content2']),
(None, 'name1.csv',
['field1,field2', 'value1-1,value1-2', 'value2-1,value2-2'],
[{'field1': 'value1-1', 'field2': 'value1-2'},
{'field1': 'value2-1', 'field2': 'value2-2'}]),
('TestException', 'name1.jsonl', [], [])])
def test_extract_job_output(monkeypatch, error_code, file_name, file_contents, output):
demo_resources = ComprehendDemoResources(None, None)
demo_resources.bucket = MagicMock()
demo_resources.bucket.name = 'test-bucket'
job = {'OutputDataConfig': {
'S3Uri': f's3://{demo_resources.bucket.name}/test-key'}}
def mock_output(output_key, output_bytes):
assert output_key == 'test-key'
output_bytes.write(b'test-content')
demo_resources.bucket.download_fileobj = mock_output
if error_code is not None:
demo_resources.bucket.download_fileobj.side_effect = ClientError(
{'Error': {'Code': error_code}}, 'test-op')
def mock_extract_file(name):
return BytesIO('\n'.join(file_contents).encode())
monkeypatch.setattr(
tarfile, 'open', lambda fileobj, mode: MagicMock(
extractfile=mock_extract_file, getnames=lambda: [file_name]))
got_output = demo_resources.extract_job_output(job)
if error_code is None:
assert got_output[file_name]['data'] == output
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_cleanup(make_stubber, monkeypatch, error_code):
s3_resource = boto3.resource('s3')
s3_stubber = make_stubber(s3_resource.meta.client)
iam_resource = boto3.resource('iam')
iam_stubber = make_stubber(iam_resource.meta.client)
demo_resources = ComprehendDemoResources(s3_resource, iam_resource)
bucket_name = 'doc-example-bucket-test-uuid'
role_name = 'comprehend-classifier-demo-role'
policy_name = 'comprehend-classifier-demo-policy'
policy_arn = 'arn:aws:iam:REGION:123456789012:policy/test-policy'
demo_resources.data_access_role = iam_resource.Role(role_name)
demo_resources.bucket = s3_resource.Bucket(bucket_name)
iam_stubber.stub_list_attached_role_policies(role_name, {policy_name: policy_arn})
iam_stubber.stub_detach_role_policy(role_name, policy_arn)
iam_stubber.stub_delete_policy(policy_arn)
iam_stubber.stub_delete_role(role_name, error_code=error_code)
s3_stubber.stub_list_objects(bucket_name, ['key1'])
s3_stubber.stub_delete_objects(bucket_name, ['key1'])
s3_stubber.stub_delete_bucket(bucket_name, error_code=error_code)
demo_resources.cleanup()
|
#!/usr/bin/python
y = ' | ''75
95 64
17 47 82
18 35 87 10 |
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23'''
i = 0
a = {}
for line in y.split('\n'):
l = line.split()
a[i] = l
i += 1
x_pos = 0
tally = 0
for y_pos in range(0, 14):
tally += int(a[y_pos][x_pos])
print int(a[y_pos][x_pos])
next_l = int(a[y_pos+1][x_pos])
next_r = int(a[y_pos+1][x_pos+1])
if next_l < next_r:
x_pos += 1
print int(a[y_pos+1][x_pos])
tally += int(a[y_pos+1][x_pos])
print tally
|
# Copyright 2019, Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS O | F ANY KIND, either express or implied.
# See the License for the specific language governing permissi | ons and
# limitations under the License.
from __future__ import absolute_import
import functools
def add_methods(source_class, blacklist=()):
"""Add wrapped versions of the `api` member's methods to the class.
Any methods passed in `blacklist` are not added.
Additionally, any methods explicitly defined on the wrapped class are
not added.
"""
def wrap(wrapped_fx, lookup_fx):
"""Wrap a GAPIC method; preserve its name and docstring."""
# If this is a static or class method, then we do *not*
# send self as the first argument.
#
# For instance methods, we need to send self.api rather
# than self, since that is where the actual methods were declared.
if isinstance(lookup_fx, (classmethod, staticmethod)):
fx = lambda *a, **kw: wrapped_fx(*a, **kw) # noqa
return staticmethod(functools.wraps(wrapped_fx)(fx))
else:
fx = lambda self, *a, **kw: wrapped_fx(self.api, *a, **kw) # noqa
return functools.wraps(wrapped_fx)(fx)
def actual_decorator(cls):
# Reflectively iterate over most of the methods on the source class
# (the GAPIC) and make wrapped versions available on this client.
for name in dir(source_class):
# Ignore all private and magic methods.
if name.startswith("_"):
continue
# Ignore anything on our blacklist.
if name in blacklist:
continue
# Retrieve the attribute, and ignore it if it is not callable.
attr = getattr(source_class, name)
if not callable(attr):
continue
# Add a wrapper method to this object.
lookup_fx = source_class.__dict__[name]
fx = wrap(attr, lookup_fx)
setattr(cls, name, fx)
# Return the augmented class.
return cls
# Simply return the actual decorator; this is returned from this method
# and actually used to decorate the class.
return actual_decorator
|
# -*- coding: utf-8 -*-
import os
import sys
from django.contrib.auth.decorators import login_required
from django import get_version as get_django_version
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.conf import settings
from django_info import __version__
def django_info_version():
return __version__
def get_platform():
return ' '.join(os.uname())
def get_python_version():
return sys.version
def get_database_engine():
return settings.DATABASES['default']['ENGINE'].split('.')[-1]
def get_installed_apps():
return settings.INSTALLED_APPS
def get_debug_mode():
return settings.DEBUG
def get_template_debug_mode():
return settings.TEMPLATE_DEBUG
def is_dev_server(request):
"""
http://stackoverflow.com/a/1291858/1503
"""
server_software = request.META.get('SERVER_SOFTWARE', None)
return server_software is not None and ('WSGIServer' in server_software or 'Python' in server_software)
def get_path(request):
path = request.META.get('PATH', None)
if path:
return [p for p in path.split(":")]
return None
@login_required(login_url="/admin/")
def info(request):
context = {
'django_info_version': django_info_version(),
'django_version': get_django_version(),
'database_engine': get_database_engine(),
'python_version': | get_python_version(),
'platform': get_platform(),
# settings
'settings_debug_mode': get_debug_mode(),
'settings_template_debug_mode': get_template_debug_mode(),
'settings_installed_apps': get_installed_apps(),
'is_dev_server': is_dev_server(request),
'paths': get_path(request) | ,
}
return render_to_response('django_info/info.html', context,
context_instance=RequestContext(request))
|
# encoding=utf8
import utils
import pickle
import zipfile
import os
from tqdm import tqdm
from pprint import pprint
# Globals
#[Special, Heavy, Primary]
bucketHashes = [2465295065,953998645,1498876634]
# Load in Manifest
print 'Loading Manifest...'
with open('manifest.pickle','rb') as f:
data = pickle.loads(f.read())
# Convert strings to Unicodie
print 'Converting Manifest...'
data = utils.convert(data)
# Get the Items, Grid, Stats, and Perks tables from the Manifest
items = data['DestinyInventoryItemDefinition']
grids = data['DestinyTalentGridDefinition']
stats = data['DestinyStatDefiniti | on']
perks = data['DestinySandboxPerkDefinition']
# Get all named items from the database
all_items = {}
print 'Creating items....\n'
for i in tqdm(items, desc='Item Gathering'):
# Get Weapons
if items[i]['bucketTypeHash'] in bucketHashes:
if 'itemName' in items[i].viewkeys():
all_items[items[i]['itemName']] = {'grid':items[i][' | talentGridHash'],'hash': i}
# Loop through items and create training data
cur_arch = 0
num_guns = 0
hash_list = []
bad_hashes = []
print '\nLooping through Guns to create training data...\n'
for item in tqdm(all_items, desc='Guns'):
gun = all_items[item]
cur_archive = 'archive_%d.zip' % cur_arch
# First check to see if this archive exists, if not make it
if not os.path.exists(cur_archive):
zf = zipfile.ZipFile(cur_archive, 'a', zipfile.ZIP_DEFLATED, allowZip64=True)
zf.close()
# Make sure this archive can handle another file
if not(os.stat(cur_archive).st_size <= 3900000000):
# Create a contents file for the archive
with open('contents.txt','w') as f:
for i in hash_list:
f.write('%d.txt' % i)
zf = zipfile.ZipFile(cur_archive, 'a', zipfile.ZIP_DEFLATED, allowZip64=True)
zf.write('contents.txt')
zf.close()
os.remove('contents.txt')
cur_arch += 1
hash_list = []
# Open zipfile
zf = zipfile.ZipFile(cur_archive, 'a', zipfile.ZIP_DEFLATED, allowZip64=True)
# Create grid for gun
# If it is no good, just continue onto the next
try:
grid = utils.makeGrid(grids[gun['grid']])
except:
bad_hashes.append(gun['hash'])
continue
# Create the training data!
utils.makeTrainingDataJSON(items, stats, perks, utils.makeGrid(grids[gun['grid']]), gun['hash'])
# Add this to the zipfile
zf.write('%d.txt' % gun['hash'])
zf.close()
# Remove the file and add the hash to the list
os.remove('%d.txt' % gun['hash'])
hash_list.append(gun['hash'])
num_guns += 1
# Done! Add contents to the last archive
with open('contents.txt','w') as f:
for i in hash_list:
f.write('%d.txt\n' % i)
zf = zipfile.ZipFile('archive_%d.zip' % cur_arch, 'a', zipfile.ZIP_DEFLATED, allowZip64=True)
zf.write('contents.txt')
zf.close()
os.remove('contents.txt')
# Show completion and print end stats!
print '\nComplete!'
print 'Created training data for %d guns across %d %s!' % (num_guns, cur_arch+1, 'archives' if cur_arch > 0 else 'archive')
print 'Skipped %d hashes!' % len(bad_hashes)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.