prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
"""
The MIT License (MIT)
Copyright (c) 2016 Daniele Linguaglossa <d.linguaglossa@mseclab.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from pyjfuzz.core.pjf_configurat | ion i | mport PJFConfiguration
import unittest
import argparse
import sys
__TITLE__ = "Testing PJFConfiguration object"
class TestPJFConfiguration(unittest.TestCase):
def test_json_configuration(self):
sys.argv.append("--J")
sys.argv.append("[1]")
sys.argv.append("--no-logo")
parser = argparse.ArgumentParser(description='', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--J', type=str, default=None)
parser.add_argument('--no-logo', action='store_true', dest='nologo', default=False, required=False)
parsed = parser.parse_args()
args = PJFConfiguration(parsed)
for arg in parsed.__dict__:
self.assertTrue(arg in args.__dict__)
def test():
print("=" * len(__TITLE__))
print(__TITLE__)
print("=" * len(__TITLE__))
suite = unittest.TestLoader().loadTestsFromTestCase(TestPJFConfiguration)
unittest.TextTestRunner(verbosity=2).run(suite)
|
e ValueError('Unsupported destination buffer type %r', dst.dtype)
dst_is_uint16 = (dst.dtype == 'uint16')
with self.tile_request(tx, ty, readonly=True) as src:
if src is transparent_tile.rgba:
#dst[:] = 0 # <-- notably slower than memset()
if dst_is_uint16:
mypaintlib.tile_clear_rgba16(dst)
else:
mypaintlib.tile_clear_rgba8(dst)
else:
if dst_is_uint16:
# this will do memcpy, not worth to bother skipping the u channel
mypaintlib.tile_copy_rgba16_into_rgba16(src, dst)
else:
if dst_has_alpha:
mypaintlib.tile_convert_rgba16_to_rgba8(src, dst)
else:
mypaintlib.tile_convert_rgbu16_to_rgbu8(src, dst)
def composite_tile(self, dst, dst_has_alpha, tx, ty, mipmap_level=0,
opacity=1.0, mode=mypaintlib.CombineNormal,
*args, **kwargs):
"""Composite one tile of this surface over a NumPy array.
See lib.surface.TileCompositable for the parameters. This
implementation adds two further ones:
:param float opacity: opacity multiplier
:param int mode: mode to use when compositing
"""
# Apply zero-alpha-source optimizations if possible.
# Sometimes this can be done without issuing a tile request.
if opacity == 0:
if dst_has_alpha:
if mode in lib.modes.MODES_CLEARING_BACKDROP_AT_ZERO_ALPHA:
mypaintlib.tile_clear_rgba16(dst)
return
if mode not in lib.modes.MODES_EFFECTIVE_AT_ZERO_ALPHA:
return
# Tile request needed, but may need to satisfy it from a deeper
# mipmap level.
if self.mipmap_level < mipmap_level:
self.mipmap.composite_tile(dst, dst_has_alpha, tx, ty,
mipmap_level, opacity, mode)
return
# Tile request at the required level.
# Try optimizations again if we got the special marker tile
with self.tile_request(tx, ty, readonly=True) as src:
if src is transparent_tile.rgba:
if dst_has_alpha:
if mode in lib.modes.MODES_CLEARING_BACKDROP_AT_ZERO_ALPHA:
mypaintlib.tile_clear_rgba16(dst)
return
if mode not in lib.modes.MODES_EFFECTIVE_AT_ZERO_ALPHA:
return
mypaintlib.tile_combine(mode, src, dst, dst_has_alpha, opacity)
## Snapshotting
def save_snapshot(self):
"""Creates and returns a snapshot of the surface
Snapshotting marks all the tiles of the surface as read-only,
then just shallow-copes the tiledict. It's quick. See
tile_request() for how new read/write tiles can be unlocked.
"""
sshot = _SurfaceSnapshot()
for t in self.tiledict.itervalues():
t.readonly = True
sshot.tiledict = self.tiledict.copy()
return sshot
def load_snapshot(self, sshot):
"""Loads a saved snapshot, replacing the internal tiledict"""
self._load_tiledict(sshot.tiledict)
def _load_tiledict(self, d):
"""Efficiently loads a tiledict, and notifies the observers"""
if d == self.tiledict:
# common case optimization, called via stroke.redo()
# testcase: comparison above (if equal) takes 0.6ms, code below 30ms
return
old = set(self.tiledict.iteritems())
self.tiledict = d.copy()
new = set(self.tiledict.iteritems())
dirty = old.symmetric_difference(new)
for pos, tile in dirty:
self._mark_mipmap_dirty(*pos)
bbox = lib.surface.get_tiles_bbox(pos for (pos, tile) in dirty)
if not bbox.empty():
self.notify_observers(*bbox)
## Loading tile data
def load_from_surface(self, other):
"""Loads tile data from another surface, via a snapshot"""
self.load_snapshot(other.save_snapshot())
def _load_from_pixbufsurface(self, s):
dirty_tiles = set(self.tiledict.keys())
self.tiledict = {}
for tx, ty in s.get_tiles():
with self.tile_request(tx, ty, readonly=False) as dst:
s.blit_tile_into(dst, True, tx, ty)
dirty_tiles.update(self.tiledict.keys())
bbox = lib.surface.get_tiles_bbox(dirty_tiles)
self.notify_observers(*bbox)
def load_from_numpy(self, arr, x, y):
"""Loads tile data from a numpy array
:param arr: Array containing the pixel data
:type arr: numpy.ndarray of uint8, dimensions HxWx3 or HxWx4
:param x: X coordinate for the array
:param y: Y coordinate for the array
:returns: the dimensions of the loaded surface, as (x,y,w,h)
"""
h, w, channels = arr.shape
if h <= 0 or w <= 0:
return (x, y, w, h)
if arr.dtype == 'uint8':
s = pixbufsurface.Surface(x, y, w, h, data=arr)
self._load_from_pixbufsurface(s)
else:
raise ValueError("Only uint8 data is supported by MyPaintSurface")
return (x, y, w, h)
def load_from_png(self, filename, x, y, feedback_cb=None,
convert_to_srgb=True,
**kwargs):
"""Load from a PNG, one tilerow at a time, discarding empty tiles.
:param str filename: The file to load
:param int x: X-coordinate at which to load the replacement data
:param int y: Y-coordinate at which to load the replacement data
:param bool convert_to_srgb: If True, convert to sRGB
:param callable feedback_cb: Called every few tile rows
:param dict \*\*kwargs: Ignored
Raises a `lib.errors.FileHandlingError` with a descriptive
string when conversion or PNG reading fails.
| """
dirty_tiles = set(self.tiledict.keys())
self.tiledict = {}
state = {}
state['buf'] = None # array of height N, width depends on image
state['ty'] = y // N # current tile row being filled into buf
state['frame_size'] = None
def get_buffer(png_w, png_h):
state['frame_size'] = x, y, png_w, png_h
if feedback_cb:
feedback_cb()
| buf_x0 = x // N * N
buf_x1 = ((x + png_w - 1) // N + 1) * N
buf_y0 = state['ty']*N
buf_y1 = buf_y0+N
buf_w = buf_x1-buf_x0
buf_h = buf_y1-buf_y0
assert buf_w % N == 0
assert buf_h == N
if state['buf'] is not None:
consume_buf()
else:
state['buf'] = np.empty((buf_h, buf_w, 4), 'uint8')
png_x0 = x
png_x1 = x+png_w
subbuf = state['buf'][:, png_x0-buf_x0:png_x1-buf_x0]
if 1: # optimize: only needed for first and last
state['buf'].fill(0)
png_y0 = max(buf_y0, y)
png_y1 = min(buf_y0+buf_h, y+png_h)
assert png_y1 > png_y0
subbuf = subbuf[png_y0-buf_y0:png_y1-buf_y0, :]
state['ty'] += 1
return subbuf
def consume_buf():
ty = state['ty']-1
for i in xrange(state['buf'].shape[1] // N):
tx = x // N + i
src = state['buf'][:, i*N:(i+1)*N, :]
if src[:, :, 3].any():
with self.tile_request(tx, ty, readonly=False) as dst:
mypaintlib.tile_convert_rgba8_to_rgba16(src, dst)
if sys.platform == 'win32':
filename_sys = filename.encode("utf-8")
else:
filename_sys = filename.encode(sys.getfilesystemencoding()) # FIXME: should not do that, should use open(unicode_object)
try:
flags = mypaintlib.load_png_fast_progressive(
filename_sys,
get_buffer,
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedObject
from otp.speedchat import SpeedChatGlobals
class DistributedScavengerHuntTarget(DistributedObject.DistributedObject):
| notify = DirectNotifyGlobal.directNotify.newCategory('DistributedScavengerHu | ntTarget')
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
def setupListenerDetails(self):
self.triggered = False
self.triggerDelay = 15
self.accept(SpeedChatGlobals.SCCustomMsgEvent, self.phraseSaid)
def phraseSaid(self, phraseId):
self.notify.debug('Checking if phrase was said')
helpPhrase = 10003
def reset():
self.triggered = False
if phraseId == helpPhrase and not self.triggered:
self.triggered = True
self.attemptScavengerHunt()
taskMgr.doMethodLater(self.triggerDelay, reset, 'ScavengerHunt-phrase-reset', extraArgs=[])
def announceGenerate(self):
DistributedObject.DistributedObject.announceGenerate(self)
DistributedScavengerHuntTarget.notify.debug('announceGenerate')
self.setupListenerDetails()
def delete(self):
self.ignoreAll()
taskMgr.remove('ScavengerHunt-phrase-reset')
DistributedObject.DistributedObject.delete(self)
def attemptScavengerHunt(self):
DistributedScavengerHuntTarget.notify.debug('attempScavengerHunt')
self.sendUpdate('attemptScavengerHunt', [])
|
itorWidgetTms(),
KNOWN_DRIVERS.WMS: EditorWidgetWms(),
KNOWN_DRIVERS.WFS: EditorWidgetWfs(),
KNOWN_DRIVERS.GEOJSON: EditorWidgetGeoJson(),
}
# init icon selector
# self.txtIcon.set_dialog_ext(self.tr('Icons (*.ico *.jpg *.jpeg *.png *.svg);;All files (*.*)'))
# self.txtIcon.set_dialog_title(self.tr('Select icon for data source'))
self.iconChooseButton.clicked.connect(self.choose_icon)
# init combos
self.init_groups_cmb()
self.init_types_cmb()
self.change_spec_tab()
# validators
self.id_validator = LineEditColorValidator(self.txtId, '^[A-Za-z0-9_]+$', error_tooltip=self.tr('Any text'))
self.alias_validator = LineEditColorValidator(self.txtAlias, '^[A-Za-z0-9_ ]+$', error_tooltip=self.tr('Any text'))
# events
self.cmbType.currentIndexChanged.connect(self.change_spec_tab)
# vars
self.ds_info = None
self.init_with_existing = False
self._editor_tab = None
self.set_icon(
os.path.join(
os.path.dirname(__file__),
'icons',
'mapservices.png'
)
)
def init_groups_cmb(self):
ds_groups = GroupsList()
for ds_group in ds_groups.groups.values():
self.cmbGroup.addItem(QIcon(ds_group.icon), self.tr(ds_group.alias), ds_group)
def init_types_cmb(self):
for drv in KNOWN_DRIVERS.ALL_DRIVERS:
self.cmbType.addItem(drv, drv)
def change_spec_tab(self, index=0):
# remove old widget
self.tabWidget.removeTab(2) # bad!
drv = self.cmbType.itemData(self.cmbType.currentIndex())
self.tabWidget.addTab(self.DRV_WIDGETS[drv], drv)
def set_ds_info(self, ds_info):
self.ds_info = ds_info
self.init_with_existing = True
# feel fields
self.feel_common_fields()
self.feel_specific_fields()
def fill_ds_info(self, ds_info):
self.ds_info = ds_info
self.init_with_existing = False
# feel fields
self.feel_common_fields()
self.feel_specific_fields()
def choose_icon(self):
icon_path = getOpenFileName(
self,
self.tr('Select icon for data source'),
PluginSettings.get_default_user_icon_path(),
self.tr('Icons (*.ico *.jpg *.jpeg *.png *.svg);;All files (*.*)')
)
if icon_path != "":
PluginSettings.set_default_user_icon_path(icon_path)
self.set_icon(icon_path)
def set_icon(self, icon_path):
self.__ds_icon = icon_path
self.iconPreview.setPixmap(
QPixmap(self.__ds_icon)
)
def feel_common_fields(self):
self.txtId.setText(self.ds_info.id)
self.txtAlias.setText(self.ds_info.alias)
# self.txtIcon.set_path(self.ds_info.icon_path)
self.set_icon(self.ds_info.icon_path)
# license
self.txtLicense.setText(self.ds_info.lic_name)
self.txtLicenseLink.setText(self.ds_info.lic_link)
self.txtCopyrightText.setText(self.ds_info.copyright_text)
self.txtCopyrightLink.setText(self.ds_info.copyright_link)
self.txtTermsOfUse.setText(self.ds_info.terms_of_use)
# set group
group_index = None
for i in range(self.cmbGroup.count()):
if self.cmbGroup.itemData(i).id == self.ds_info.group:
group_index = i
break
if group_index is not None:
self.cmbGroup.setCurrentIndex(i)
else:
non_ex_group = GroupInfo(group_id=self.ds_info.group)
self.cmbGroup.addItem(self.ds_info.group, non_ex_group)
self.cmbGroup.setCurrentIndex(self.cmbGroup.count()-1)
def feel_specific_fields(self):
# set type
self.cmbType.setCurrentIndex(self.cmbType.findData(self.ds_info.type))
# feel widgets
for spec_widget in self.DRV_WIDGETS.values():
spec_widget.feel_form(self.ds_info)
def accept(self):
new_ds_info = DataSourceInfo()
self.feel_ds_info(new_ds_info)
if not self.validate(new_ds_info):
return
if self.init_with_existing:
res = self.save_existing(new_ds_info)
else:
res = self.create_new(new_ds_info)
if res:
super(DsEditDialog, self).accept()
def save_existing(self, ds_info):
if ds_info.id != self.ds_info.id and not self.check_existing_id(ds_info.id):
return False
if ds_info == self.ds_info:
return True
# replace icon if need
if not is_same(ds_info.icon_path, self.ds_info.icon_path):
os.remove(self.ds_info.icon_path)
dir_path = os.path.dirname(self.ds_info.file_path)
ico_file_name = path.basename(ds_info.icon_path)
ico_path = path.join(dir_path, ico_file_name)
shutil.copy(ds_info.icon_path, ico_path)
# replace gdal_conf if need
if ds_info.type == KNOWN_DRIVERS.GDAL:
def copy_new_gdal_file():
dir_path = os.path.dirname(self.ds_info.file_path)
gdal_file_name = path.basename(ds_info.gdal_source_file)
gdal_file_path = path.join(dir_path, gdal_file_name)
shutil.copy(ds_info.gdal_source_file, gdal_file_path)
# old ds = gdal
if self.ds_info.type == KNOWN_DRIVERS.GDAL:
if ds_info.gdal_source_file != self.ds_info.gdal_source_file:
os.remove(self.ds_info.icon_path)
copy_new_gdal_file()
else:
copy_new_gdal_file()
# write config
DataSourceSerializer.write_to_ini(ds_info, self.ds_info.file_path)
return True
def create_new(self, ds_info):
if not self.check_existing_id(ds_info.id):
return False
# set paths
dir_path = path.join(extra_sources.USER_DIR_PATH, extra_sources.DATA_SOURCES_DIR_NAME, ds_info.id)
if path.exists(dir_path):
salt = 0
while path.exists(dir_path + str(salt)):
salt += 1
dir_path += str(salt)
ini_path = path.join(dir_path, 'metadata.ini')
ico_path = path.join(dir_path, ds_info.icon)
# create dir
os.mkdir(dir_path)
# copy icon
shutil.copy(ds_info.icon_path, ico_path)
if ds_info.type == KNOWN_DRIVERS.GDAL:
# copy gdal file
gdal_file_name = path.basename(ds_info.gdal_source_file)
gdal_file_path = path.join(dir_path, gdal_file_name)
shutil.copy(ds_info.gdal_source_file, gdal_file_path)
# write config
DataSourceSerializer.write_to_ini(ds_info, ini_path)
return True
def check_existing_id(self, ds_id):
gl = DataSourcesList()
if ds_id in gl.data_sources.keys():
QMessageBox.crit | ical(self, self.tr('Error on save group'),
self.tr('Data source with such id already exists! Select new id for data source!'))
return False
return True
def feel_ds_info(self, ds_info):
ds_info.id = self.txt | Id.text()
ds_info.alias = self.txtAlias.text()
# ds_info.icon = os.path.basename(self.txtIcon.get_path())
ds_info.icon = os.path.basename(self.__ds_icon)
ds_info.lic_name = self.txtLicense.text()
ds_info.lic_link = self.txtLicenseLink.text()
ds_info.copyright_text = self.txtCopyrightText.text()
ds_info.copyright_link = self.txtCopyrightLink.text()
ds_info.terms_of_use = self.txtTermsOfUse.text()
ds_info.group = self.cmbGroup.itemData(self.cmbGroup.currentIndex()).id
ds_info.type = self.cmbType.itemData(self.cmbType.currentIndex())
self.DRV_WIDGETS[ds_info.type].feel_ds_info(ds_info)
ds_info.icon_path = self.__ds_icon
# ds_info.icon_path = self.txtIcon.get_path()
def validate(self, ds_info):
# validate common fields
|
"""log model admin."""
from django.contrib import admin
from django.db import models
from django.forms.widgets import TextInput
from apps.managers.challenge_mgr import challenge_mgr
from apps.managers.log_mgr.models import MakahikiLog
from apps.admin.admin import challenge_designer_site, challenge_manager_site, developer_site
class MakahikiLogAdmin(admin.ModelAdmin):
"""admin"""
list_display = ('request_url', "remote_user", 'remote_ip', 'request_time',
'request_method', 'response_status')
list_filter = ('response_status', 'remote_user')
search_fields = ('request_url', 'remote_ip')
ordering = ["-request_time"]
date_hierarchy = "request_time"
formfield_overrides = {
models.CharField: {'widget': TextInput(attrs={'size': '100'})},
}
def has_add_permission(self, request):
return False |
admin.site.register(MakahikiLo | g, MakahikiLogAdmin)
challenge_designer_site.register(MakahikiLog, MakahikiLogAdmin)
challenge_manager_site.register(MakahikiLog, MakahikiLogAdmin)
developer_site.register(MakahikiLog, MakahikiLogAdmin)
challenge_mgr.register_admin_challenge_info_model("Status", 1, MakahikiLog, 1)
challenge_mgr.register_developer_challenge_info_model("Status", 4, MakahikiLog, 1)
|
from ni.core.selection import Selection
from ni.core.text import char_pos_to_tab_pos
from ni.core.document import InsertDelta, DeleteDelta
class Action(object):
"""Base class for all view actions."""
def __init__(self, view):
self.grouped = False
self.editor = view.editor
self.view = view
def execute(self):
raise NotImplementedError
class MoveCursorAction(Action):
"""Base class for all actions that involve moving the cursor around."""
def __init__(self, view, is_select=False):
super(MoveCursorAction, self).__init__(view)
self.is_select = is_select
def execute(self):
view = self.view
doc = view.document
original_position = view.cursor_pos
original_scroll = view.scroll_pos
self.move()
if original_position != view.cursor_pos or \
original_scroll != view.scroll_pos:
view.invalidate()
if self.is_select:
if view.selection:
end_offset = doc.cursor_pos_to_offset(view.cursor_pos)
view.selection.end = end_offset
else:
start_offset = doc.cursor_pos_to_offset(original_position)
end_offset = doc.cursor_pos_to_offset(view.cursor_pos)
| #print original_position, view.cursor_pos, start_offset, end_offset
view.selection = Selection(doc, start_offset, end_offset)
def move(self):
raise NotImplementedError
class EditAction(Action):
"""Base class | for all undoable actions."""
def __init__(self, view):
super(EditAction, self).__init__(view)
self.before_cursor_pos = None
self.before_last_x_pos = None
self.before_scroll_pos = None
self.after_cursor_pos = None
self.after_last_x_pos = None
self.after_scroll_pos = None
self.deltas = []
self.is_executed = False
def execute(self):
"""
Save positions so that we can return later and call self.do().
"""
self.is_executed = True
view = self.view
# for undo purposes
self.before_cursor_pos = view.cursor_pos
self.before_last_x_pos = view.last_x_pos
self.before_scroll_pos = view.scroll_pos
self.do()
# recalculate last_x_pos based on where the cursor is now
doc = view.document
y, x = view.cursor_pos
line = doc.get_line(y)
view.last_x_pos = char_pos_to_tab_pos(line, x, doc.tab_size)
# for redo purposes
self.after_cursor_pos = view.cursor_pos
self.after_last_x_pos = view.last_x_pos
self.after_scroll_pos = view.scroll_pos
view.invalidate()
def delete_selection(self):
"""
Common code for deleting a selection used by many edit actions.
"""
view = self.view
doc = view.document
# delete the selection
selection = view.selection.get_normalised()
d = DeleteDelta(doc, selection.start, selection.end-selection.start+1)
d.do()
self.deltas.append(d)
view.selection = None
# move the cursor (insert point) to the start of where the selection
# was before we deleted it
view.cursor_pos = doc.offset_to_cursor_pos(selection.start)
def do(self):
"""
Subclasses should implement this.
"""
raise NotImplementedError
def undo(self):
if not self.is_executed:
raise RuntimeError("Not executed")
for d in reversed(self.deltas):
d.undo()
# reset the cursor and scroll positions to where it was
self.view.cursor_pos = self.before_cursor_pos
self.view.last_x_pos = self.before_last_x_pos
self.view.scroll_pos = self.before_scroll_pos
self.view.invalidate()
def redo(self):
if not self.is_executed:
raise RuntimeError("Not executed")
for d in self.deltas:
d.do()
# reset the cursor and scroll positions to where it was
self.view.cursor_pos = self.after_cursor_pos
self.view.last_x_pos = self.after_last_x_pos
self.view.scroll_pos = self.after_scroll_pos
self.view.invalidate()
class ToggleComment(EditAction):
def __init__(self, view, comment_string):
self.comment_string = comment_string
super(ToggleComment, self).__init__(view)
def do(self):
view = self.view
doc = view.document
settings = self.editor.settings
if view.selection:
selection = view.selection.get_normalised()
from_line = doc.offset_to_cursor_pos(selection.start)[0]
to_line = doc.offset_to_cursor_pos(selection.end)[0]
else:
from_line = view.cursor_pos[0]
to_line = from_line
for y in xrange(from_line, to_line+1):
line = doc.get_line(y)
offset = doc.cursor_pos_to_offset((y, 0))
if line[:len(self.comment_string)] == self.comment_string:
d = DeleteDelta(doc, offset, len(self.comment_string))
else:
d = InsertDelta(doc, offset, self.comment_string)
d.do()
self.deltas.append(d)
# move the cursor if necessary
y, x = view.cursor_pos
line = doc.get_line(y)
if line[:len(self.comment_string)] == self.comment_string:
# we added comment_string, so increase cursor pos
if x != 0:
x += len(self.comment_string)
if x > len(line):
x = len(line)
view.cursor_pos = (y, x)
else:
# we removed comment_string, so decrease cursor pos
x -= len(self.comment_string)
if x < 0:
x = 0
view.cursor_pos = (y, x)
# not sure how best to grow/shrink the selection right now,
# so just destroying it for now
view.selection = None
|
#!/usr/bin/env python
from glob import glob
from distutils.core import setup
setup( name="mythutil | s_recfail_alarm",
version="1.0",
description="Autoamtically notify on Recorder Failed via Prowl service",
author="Wylie Swanson",
author_email="wylie@pingzero.net",
url="http://www.pingzero.net",
scripts=glob("bin/*"),
data_file | s=[
( '/etc/mythutils/', glob('etc/mythutils/*') ),
( '/etc/cron.d/', glob('etc/cron.d/*') ),
]
)
|
##
# You should have received a copy of the GNU General Public License
# alo | ng | with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for xlmpich compiler toolchain (includes IBM XL compilers (xlc, xlf) and MPICH).
@author: Jack Perdue <j-perdue@tamu.edu> - TAMU HPRC - http://sc.tamu.edu
"""
from easybuild.toolchains.compiler.ibmxl import IBMXL
from easybuild.toolchains.mpi.mvapich2 import Mvapich2
class Xlompi(IBMXL, Mvapich2):
"""
Compiler toolchain with IBM XL compilers (xlc/xlf) and MPICH.
"""
NAME = 'xlmvapich2'
|
sourceConformity
classes = ('collapse closed',)
extra = 1
class ResponsiblePartyRoleInline(admin.TabularInline):
model = ResponsiblePartyRole
classes = ('collapse closed',)
extra = 1
class ResourceResponsiblePartyRoleInline(admin.TabularInline):
model = ResourceResponsiblePartyRole
classes = ('collapse closed',)
extra = 1
class MdResponsiblePartyRoleInline(admin.TabularInline):
model = MdResponsiblePartyRole
#exclude = ('role',)
readonly_fields = ('role',)
classes = ('collapse closed',)
extra = 1
class ResourceMdResponsiblePartyRoleInline(admin.TabularInline):
model = ResourceMdResponsiblePartyRole
#exclude = ('role',)
readonly_fields = ('role',)
classes = ('collapse closed',)
extra = 1
class BaseCodeAdmin(TranslationAdmin):
list_editable = ['label',]
list_display = ['id', 'label']
class Media:
js = translation_js
css = translation_css
class BaseCodeIsoAdmin(TranslationAdmin):
list_editable = ['label','isoid']
list_display = ['id', 'label', 'isoid']
class Media:
js = translation_js
css = translation_css
class CodeRefSysAdmin(TranslationAdmin):
list_editable = ['label', 'srid']
list_display = ['id', 'label', 'srid']
class Media:
js = translation_js
css = translation_css
class CodeLicenseAdmin(TranslationAdmin):
list_editable = ['label', 'abstract']
list_display = ['id', 'label', 'abstract']
class Media:
js = translation_js
css = translation_css
class CodeDistributionFormatAdmin(TranslationAdmin):
list_editable = ['format','label', 'version', 'mimetype', 'ordering']
list_display = ['id', 'format', 'label', 'version', 'mimetype', 'ordering']
class Media:
js = translation_js
css = translation_css
class ResponsiblePartyAdmin(TranslationAdmin):
# list_editable = ['label', 'version', 'ordering']
# list_display = ['id', 'label', 'version', 'ordering']
class Media:
js = translation_js
css = translation_css
class LayerExtAdmin(TranslationAdmin):
# row-level permissions
# http://www.ibm.com/developerworks/opensource/library/os-django-admin/index.html
def queryset(self, request):
qs = super(LayerExtAdmin, self).queryset(request)
if request.user | .is_superuser:
return qs
return qs.filter(id__in = UserObjectRoleMapping.objects.filter(user=request.user,
role__codename__in =('layer_readwrite','layer_admin')
).values_list('object_id',flat=True)
| )
list_display = ('titleml',)
inlines = [ # OnlineResourceInline,
TemporalExtentInline,
ReferenceDateInline,
ConformityInline,
ResponsiblePartyRoleInline,
MdResponsiblePartyRoleInline,
# ConnectionInline,
# InverseConnectionInline,
]
#raw_id_fields = ("parent_identifier",)
filter_horizontal = ['presentation_form','spatial_representation_type_ext','topic_category_ext','responsible_party_role','distribution_format','md_responsible_party_role']
# filter_horizontal
#readonly_fields = ['uuid', 'geographic_bounding_box']
# readonly_fields = ['uuid', 'md_uuid', 'geographic_bounding_box', 'md_standard_name', 'md_version_name', 'md_character_set']
search_fields = ['titleml', 'abstractml']
search_fields_verbose = ['Titolo', 'Descrizione'] #GRAPPELLI
list_filter = ('resource_type', 'spatial_representation_type_ext', 'topic_category', 'distribution_format')
list_display = ('id', 'titleml', 'inspire', 'completeness_bar')
fieldsets = (
(_('Metadata'), {
'classes': ('collapse closed',),
'fields': (
'md_uuid',
#'lingua_metadata',
'md_date_stamp',
('md_character_set', 'md_standard_name', 'md_version_name')
)
}),
(_('Identification'), {
'classes': ('collapse closed',),
'fields': (
'titleml', 'abstractml', # 'source_document', # override by resources connections
#'resource_type', 'parent_identifier', 'other_citation_details',
'other_citation_details',
'presentation_form',
'distribution_format'
)
}),
(_('Identification2'), {
'classes': ('collapse closed',),
'fields': (
('resource_type', 'uuid'),
('language', 'character_set'),
'supplemental_information_ml',
'update_frequency',
'spatial_representation_type_ext'
)
}),
(_('Responsible Party'), {
'classes': ('collapse closed',),
'fields': []
}),
(_('Classification e Keywords'), {
'classes': ('collapse closed',),
'fields': (
'inspire', 'topic_category_ext', 'gemetkeywords'
)
}),
(_('Geographic extent'), {
'classes': ('collapse',),
'fields': (
('ref_sys', 'geographic_bounding_box'),
#'geo',
('vertical_datum', 'vertical_extent_min', 'vertical_extent_max', 'uom_vertical_extent')
)
}),
(_('Temporal extent'), {
'classes': ('collapse',),
'fields': []
}),
(_('DataQuality'), {
'classes': ('collapse closed',),
'fields': (
'lineage', ('equivalent_scale', 'distance', 'uom_distance')
)
}),
(_('Conformity'), {
'classes': ('collapse closed',),
'fields': []
}),
# ('Distribution', {
# 'classes': ('collapse closed',),
# 'fields': (
# )
# }),
(_('Constraints'), {
'classes': ('collapse closed',),
'fields': (
'license',
'use_limitation',
('access_constraints', 'use_constraints'),
'other_constraints',
'security_constraints',
)
}),
# ('Relations', {
# 'classes': ('collapse closed',),
# 'fields': []
# }),
#('Sezione sistema - non compilabile', {
# 'classes': ('collapse closed',),
# 'fields': (
# 'geonode_tipo_layer',
# )
# }),
)
class Media:
js = translation_js
css = translation_css
def response_change(self, request, obj):
res = super(LayerExtAdmin, self).response_change(request, obj)
if request.POST.has_key("_save"):
return HttpResponseRedirect(obj.get_absolute_url())
else:
return res
class ResourceAdmin(TranslationAdmin):
# row-level permissions
# http://www.ibm.com/developerworks/opensource/library/os-django-admin/index.html
def queryset(self, request):
qs = super(ResourceAdmin, self).queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(id__in = UserObjectRoleMapping.objects.filter(user=request.user,
role__codename__in =('resource_readwrite','resource_admin')
).values_list('object_id',flat=True)
)
list_display = ('titleml',)
inlines = [ # OnlineR |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; witho | ut even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PA | RTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Invenio Previewer Utilities."""
import cchardet
from flask import current_app
def detect_encoding(fp, default=None):
"""Detect the cahracter encoding of a file.
:param fp: Open Python file pointer.
:param default: Fallback encoding to use.
:returns: The detected encoding.
.. note:: The file pointer is returned at its original read position.
"""
init_pos = fp.tell()
try:
sample = fp.read(
current_app.config.get('PREVIEWER_CHARDET_BYTES', 1024))
# Result contains 'confidence' and 'encoding'
result = cchardet.detect(sample)
threshold = current_app.config.get('PREVIEWER_CHARDET_CONFIDENCE', 0.9)
if result.get('confidence', 0) > threshold:
return result.get('encoding', default)
else:
return default
except Exception:
current_app.logger.warning('Encoding detection failed.', exc_info=True)
return default
finally:
fp.seek(init_pos)
|
#########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from cloudify_rest_client import exceptions
from base_test import BaseServerTestCase
class ProviderContextTestCase(BaseServerTestCase):
def test_post_provider_context(self):
result = self.post('/provider/context', data={
'name': 'test_provider',
'context': {'key': 'value'}
})
self.assertEqual(result.status_code, 201)
self.assertEqual(result.json['status'], 'ok')
def test_get_provider_context(self):
self.test_post_provider_context()
result = self.get('/provider/context').json
| self.assertEqual(result['context']['key'], 'value')
self.assertEqual(result['name'], 'test_provider')
def test_post_provider_context_t | wice_fails(self):
self.test_post_provider_context()
self.assertRaises(self.failureException,
self.test_post_provider_context)
def test_update_provider_context(self):
self.test_post_provider_context()
new_context = {'key': 'new-value'}
self.client.manager.update_context(
'test_provider', new_context)
context = self.client.manager.get_context()
self.assertEqual(context['context'], new_context)
def test_update_empty_provider_context(self):
try:
self.client.manager.update_context(
'test_provider',
{'key': 'value'})
self.fail('Expected failure due to existing context')
except exceptions.CloudifyClientError as e:
self.assertEqual(e.status_code, 404)
self.assertEqual(e.message, 'Provider Context not found')
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you | under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtai | n a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from airflow.gcp.hooks.text_to_speech import CloudTextToSpeechHook
from tests.compat import PropertyMock, patch
from tests.gcp.utils.base_gcp_mock import mock_base_gcp_hook_default_project_id
INPUT = {"text": "test text"}
VOICE = {"language_code": "en-US", "ssml_gender": "FEMALE"}
AUDIO_CONFIG = {"audio_encoding": "MP3"}
class TestTextToSpeechHook(unittest.TestCase):
def setUp(self):
with patch(
"airflow.gcp.hooks.base.CloudBaseHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.gcp_text_to_speech_hook = CloudTextToSpeechHook(gcp_conn_id="test")
@patch("airflow.gcp.hooks.text_to_speech.CloudTextToSpeechHook.client_info", new_callable=PropertyMock)
@patch("airflow.gcp.hooks.text_to_speech.CloudTextToSpeechHook._get_credentials")
@patch("airflow.gcp.hooks.text_to_speech.TextToSpeechClient")
def test_text_to_speech_client_creation(self, mock_client, mock_get_creds, mock_client_info):
result = self.gcp_text_to_speech_hook.get_conn()
mock_client.assert_called_once_with(
credentials=mock_get_creds.return_value,
client_info=mock_client_info.return_value
)
self.assertEqual(mock_client.return_value, result)
self.assertEqual(self.gcp_text_to_speech_hook._client, result)
@patch("airflow.gcp.hooks.text_to_speech.CloudTextToSpeechHook.get_conn")
def test_synthesize_speech(self, get_conn):
synthesize_method = get_conn.return_value.synthesize_speech
synthesize_method.return_value = None
self.gcp_text_to_speech_hook.synthesize_speech(
input_data=INPUT, voice=VOICE, audio_config=AUDIO_CONFIG
)
synthesize_method.assert_called_once_with(
input_=INPUT, voice=VOICE, audio_config=AUDIO_CONFIG, retry=None, timeout=None
)
|
# -*- coding: utf-8 -*-
from | __future__ import unicode_literals
from .completion import Person
def test_person_suggests_on_all_variants_of_name(write_client):
Person.init(using=write_client)
Person(name='Honza Král', popularity=42).save(refresh=True)
s = Person.search().suggest('t', 'kra', completion={'field': 'suggest'})
response = s.execute()
opts = response.suggest.t[0].options
assert 1 == len(opts)
assert opts[0]._score == 42
assert opts[0]._source.name == ' | Honza Král'
|
#this mod | ule here is to compute the formula to calculate the new means and
#new variance.
def update(mean1, var1, mean2, var2):
new_mean = ((mean1 * var2) + (mean2*var1))/(var1 + var2)
new_var = 1/(1/var1 + 1/var2)
return [new_mean, new_var]
def predict(mean1, var1, mean2, var2):
new_mean = mean1 + mean2
new_var = var1 + var2
return [new_mean, new_var]
print 'update : 'update(10.,4., 12.,4.)
print predict(10.,4., | 12.,4.) |
indentation = _remove_ansi_escape_sequences(formatted).find(
lines[0]
)
else:
# Optimizes logging by allowing a fixed indentation.
indentation = auto_indent
lines[0] = formatted
return ("\n" + " " * indentation).join(lines)
return self._fmt | % record.__dict__
def get_option_ini(config: Config, *names: str):
for name in names:
ret = config.getoption(name) # 'default' arg won't work as expected
if ret is None:
ret = config.getini(name)
if ret:
return ret
def pytes | t_addoption(parser: Parser) -> None:
"""Add options to control log capturing."""
group = parser.getgroup("logging")
def add_option_ini(option, dest, default=None, type=None, **kwargs):
parser.addini(
dest, default=default, type=type, help="default value for " + option
)
group.addoption(option, dest=dest, **kwargs)
add_option_ini(
"--log-level",
dest="log_level",
default=None,
metavar="LEVEL",
help=(
"level of messages to catch/display.\n"
"Not set by default, so it depends on the root/parent log handler's"
' effective level, where it is "WARNING" by default.'
),
)
add_option_ini(
"--log-format",
dest="log_format",
default=DEFAULT_LOG_FORMAT,
help="log format as used by the logging module.",
)
add_option_ini(
"--log-date-format",
dest="log_date_format",
default=DEFAULT_LOG_DATE_FORMAT,
help="log date format as used by the logging module.",
)
parser.addini(
"log_cli",
default=False,
type="bool",
help='enable log display during test run (also known as "live logging").',
)
add_option_ini(
"--log-cli-level", dest="log_cli_level", default=None, help="cli logging level."
)
add_option_ini(
"--log-cli-format",
dest="log_cli_format",
default=None,
help="log format as used by the logging module.",
)
add_option_ini(
"--log-cli-date-format",
dest="log_cli_date_format",
default=None,
help="log date format as used by the logging module.",
)
add_option_ini(
"--log-file",
dest="log_file",
default=None,
help="path to a file when logging will be written to.",
)
add_option_ini(
"--log-file-level",
dest="log_file_level",
default=None,
help="log file logging level.",
)
add_option_ini(
"--log-file-format",
dest="log_file_format",
default=DEFAULT_LOG_FORMAT,
help="log format as used by the logging module.",
)
add_option_ini(
"--log-file-date-format",
dest="log_file_date_format",
default=DEFAULT_LOG_DATE_FORMAT,
help="log date format as used by the logging module.",
)
add_option_ini(
"--log-auto-indent",
dest="log_auto_indent",
default=None,
help="Auto-indent multiline messages passed to the logging module. Accepts true|on, false|off or an integer.",
)
_HandlerType = TypeVar("_HandlerType", bound=logging.Handler)
# Not using @contextmanager for performance reasons.
class catching_logs:
"""Context manager that prepares the whole logging machinery properly."""
__slots__ = ("handler", "level", "orig_level")
def __init__(self, handler: _HandlerType, level: Optional[int] = None) -> None:
self.handler = handler
self.level = level
def __enter__(self):
root_logger = logging.getLogger()
if self.level is not None:
self.handler.setLevel(self.level)
root_logger.addHandler(self.handler)
if self.level is not None:
self.orig_level = root_logger.level
root_logger.setLevel(min(self.orig_level, self.level))
return self.handler
def __exit__(self, type, value, traceback):
root_logger = logging.getLogger()
if self.level is not None:
root_logger.setLevel(self.orig_level)
root_logger.removeHandler(self.handler)
class LogCaptureHandler(logging.StreamHandler):
"""A logging handler that stores log records and the log text."""
stream: StringIO
def __init__(self) -> None:
"""Create a new log handler."""
super().__init__(StringIO())
self.records: List[logging.LogRecord] = []
def emit(self, record: logging.LogRecord) -> None:
"""Keep the log records in a list in addition to the log text."""
self.records.append(record)
super().emit(record)
def reset(self) -> None:
self.records = []
self.stream = StringIO()
def handleError(self, record: logging.LogRecord) -> None:
if logging.raiseExceptions:
# Fail the test if the log message is bad (emit failed).
# The default behavior of logging is to print "Logging error"
# to stderr with the call stack and some extra details.
# pytest wants to make such mistakes visible during testing.
raise
@final
class LogCaptureFixture:
"""Provides access and control of log capturing."""
def __init__(self, item: nodes.Node, *, _ispytest: bool = False) -> None:
check_ispytest(_ispytest)
self._item = item
self._initial_handler_level: Optional[int] = None
# Dict of log name -> log level.
self._initial_logger_levels: Dict[Optional[str], int] = {}
def _finalize(self) -> None:
"""Finalize the fixture.
This restores the log levels changed by :meth:`set_level`.
"""
# Restore log levels.
if self._initial_handler_level is not None:
self.handler.setLevel(self._initial_handler_level)
for logger_name, level in self._initial_logger_levels.items():
logger = logging.getLogger(logger_name)
logger.setLevel(level)
@property
def handler(self) -> LogCaptureHandler:
"""Get the logging handler used by the fixture.
:rtype: LogCaptureHandler
"""
return self._item._store[caplog_handler_key]
def get_records(self, when: str) -> List[logging.LogRecord]:
"""Get the logging records for one of the possible test phases.
:param str when:
Which test phase to obtain the records from. Valid values are: "setup", "call" and "teardown".
:returns: The list of captured records at the given stage.
:rtype: List[logging.LogRecord]
.. versionadded:: 3.4
"""
return self._item._store[caplog_records_key].get(when, [])
@property
def text(self) -> str:
"""The formatted log text."""
return _remove_ansi_escape_sequences(self.handler.stream.getvalue())
@property
def records(self) -> List[logging.LogRecord]:
"""The list of log records."""
return self.handler.records
@property
def record_tuples(self) -> List[Tuple[str, int, str]]:
"""A list of a stripped down version of log records intended
for use in assertion comparison.
The format of the tuple is:
(logger_name, log_level, message)
"""
return [(r.name, r.levelno, r.getMessage()) for r in self.records]
@property
def messages(self) -> List[str]:
"""A list of format-interpolated log messages.
Unlike 'records', which contains the format string and parameters for
interpolation, log messages in this list are all interpolated.
Unlike 'text', which contains the output from the handler, log
messages in this list are unadorned with levels, timestamps, etc,
making exact comparisons more reliable.
Note that traceback or stack info (from :func:`logging.exception` or
the `exc_info` or `stack_info` arguments to the logging functions) is
not included, as this is added by the formatter in the handler.
.. versio |
#! /usr/bin/env python
"""
this file converts simple html text into a docbook xml variant.
The mapping of markups and links is far from perfect. But all we
want is the docbook-to-pdf converter and similar technology being
present in the world of docbook-to-anything converters. """
from datetime import date
import match
import sys
m = match.Match
class htm2dbk_conversion_base:
regexlist = [
m()("</[hH]2>(.*)", "m") >> "</title>\n<subtitle>\\1</subtitle>",
m()("<[hH]2>") >> "<sect1 id=\"--filename--\"><title>",
m()("<[Pp]([> ])","m") >> "<para\\1",
m()("</[Pp]>") >> "</para>",
m()("<(pre|PRE)>") >> "<screen>",
m()("</(pre|PRE)>") >> "</screen>",
m()("<[hH]3>") >> "<sect2><title>",
m()("</[hH]3>((?:.(?!<sect2>))*.?)", "s") >> "</title>\\1</sect2>",
m()("<!doctype [^<>]*>","s") >> "",
m()("<!DOCTYPE [^<>]*>","s") >> "",
m()("(<\w+\b[^<>]*\swidth=)(\d+\%)","s") >> "\\1\"\\2\"",
m()("(<\w+\b[^<>]*\s\w+=)(\d+)","s") >> "\\1\"\\2\"",
m()("&&") >> "\&\;\&\;",
m()("\$\<") >> "\$\<\;",
m()("&(\w+[\),])") >> "\&\;\\1",
m()("(</?)span(\s[^<>]*)?>","s") >> "\\1phrase\\2>",
m()("(</?)small(\s[^<>]*)?>","s") >> "\\1note\\2>",
m()("(</?)(b|em|i)>")>> "\\1emphasis>",
m()("(</?)(li)>") >> "\\1listitem>",
m()("(</?)(ul)>") >> "\\1itemizedlist>",
m()("(</?)(ol)>") >> "\\1orderedlist>",
m()("(</?)(dl)>") >> "\\1variablelist>",
m()("<dt\b([^<>]*)>","s") >> "<varlistentry\\1><term>",
m()("</dt\b([^<>]*)>","s") >> "</term>",
m()("<dd\b([^<>]*)>","s") >> "<listitem\\1>",
m()("</dd\b([^<>]*)>","s") >> "</listitem></varlistentry>",
m()("<table\b([^<>]*)>","s")
>> "<informaltable\\1><tgroup cols=\"2\"><tbody>",
m()("</table\b([^<>]*)>","s") >> "</tbody></tgroup></informaltable>",
m()("(</?)tr(\s[^<>]*)?>","s") >> "\\1row\\2>",
m()("(</?)td(\s[^<>]*)?>","s") >> "\\1entry\\2>",
m()("<informaltable\b[^<>]*>\s*<tgroup\b[^<>]*>\s*<tbody>"+
"\s*<row\b[^<>]*>\s*<entry\b[^<>]*>\s*<informaltable\b","s")
>> "<informaltable",
m()("</informaltable>\s*</entry>\s*</row>"+
"\s*</tbody>\s*</tgroup>\s*</informaltable>", "s")
>> "</informaltable>",
m()("(<informaltable[^<>]*\swidth=\"100\%\")","s") >> "\\1 pgwide=\"1\"",
m()("(<tbody>\s*<row[^<>]*>\s*<entry[^<>]*\s)(width=\"50\%\")","s")
>> "<colspec colwidth=\"1*\" /><colspec colwidth=\"1*\" />\n\\1\\2",
m()("<nobr>([\'\`]*)<tt>") >> "<cmdsynopsis>\\1",
m()("</tt>([\'\`]*)</nobr>") >> "\\1</cmdsynopsis>",
m()("<nobr><(?:tt|code)>([\`\"\'])") >> "<cmdsynopsis>\\1",
m()("<(?:tt|code)><nobr>([\`\"\'])") >> "<cmdsynopsis>\\1",
m()("([\`\"\'])</(?:tt|code)></nobr>") >> "\\1</cmdsynopsis>",
m()("([\`\"\'])</nobr></(?:tt|code)>") >> "\\1</cmdsynopsis>",
m()("(</?)tt>") >> "\\1constant>",
m()("(</?)code>") >> "\\1literal>",
m()(">([^<>]+)<br>","s") >> "><highlights>\\1</highlights>",
m()("<br>") >> "<br />",
# m()("<date>") >> "<sect1info><date>",
# m()("</date>") >> "</date></sect1info>",
m()("<reference>") >> "<reference id=\"reference\">" >> 1,
m()("<a\s+href=\"((?:http|ftp|mailto):[^<>]+)\"\s*>((?:.(?!</a>))*.)</a>"
,"s") >> "<ulink url=\"\\1\">\\2</ulink>",
m()("<a\s+href=\"zziplib.html\#([\w_]+)\"\s*>((?:.(?!</a>))*.)</a>","s")
>> "<link linkend=\"$1\">$2</link>",
m()("<a\s+href=\"(zziplib.html)\"\s*>((?:.(?!</a>))*.)</a>","s")
>> "<link linkend=\"reference\">$2</link>",
m()("<a\s+href=\"([\w-]+[.]html)\"\s*>((?:.(?!</a>))*.)</a>","s")
>> "<link linkend=\"\\1\">\\2</link>",
m()("<a\s+href=\"([\w-]+[.](?:h|c|am|txt))\"\s*>((?:.(?!</a>))*.)</a>"
,"s") >> "<ulink url=\"file:\\1\">\\2</ulink>",
m()("<a\s+href=\"([A-Z0-9]+[.][A-Z0-9]+)\"\s*>((?:.(?!</a>))*.)</a>","s")
>> "<ulink url=\"file:\\1\">\\2</ulink>"
# m()("(</?)subtitle>") >> "\\1para>"
# $_ .= "</sect1>" if /<sect1[> ]/
]
regexlist2 = [
m()(r"<br\s*/?>") >> "",
m()(r"(</?)em>") >> r"\1emphasis>",
m()(r"<code>") >> "<userinput>",
m()(r"</code>") >> "</userinput>",
m()(r"<link>") >> "<function>",
m()(r"</link>") >> "</function>",
m()(r"(?s)\s*</screen>") >> "</screen>",
# m()(r"<ul>") >> "</para><programlisting>\n",
# m()(r"</ul>") >> "</programlisting><para>",
m()(r"<ul>") >> "<itemizedlist>",
m()(r"</ul>") >> "</itemizedlist>",
# m()(r"<li>") >> "",
# m()(r"</li>") >> ""
m()(r"<li>") >> "<listitem><para>",
m()(r"</l | i>") >> "</para></listitem>\n",
]
class htm2dbk_conversion(htm2dbk_conversion_base):
def __init__(self):
self.version = "" # str(date.today)
self.filename = "."
def convert(self,text): # $text
txt = text.replace("<!--VERSION-- | >", self.version)
for conv in self.regexlist:
txt &= conv
return txt.replace("--filename--", self.filename)
def convert2(self,text): # $text
txt = text.replace("<!--VERSION-->", self.version)
for conv in self.regexlist:
txt &= conv
return txt
class htm2dbk_document(htm2dbk_conversion):
""" create document, add(text) and get the value() """
doctype = (
'<!DOCTYPE book PUBLIC "-//OASIS//DTD'+
' DocBook XML V4.1.2//EN"'+"\n"+
' "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd">'+
"\n")
book_start = '<book><chapter><title>Documentation</title>'+"\n"
book_end_chapters = '</chapter>'+"\n"
book_end = '</book>'+"\n"
def __init__(self):
htm2dbk_conversion.__init__(self)
self.text = self.doctype + self.book_start
def add(self,text):
if self.text & m()("<reference"):
self.text += self.book_end_chapters ; self.book_end_chapters = ""
self.text += self.convert(text).replace(
"<br />","") & (
m()("<link>([^<>]*)</link>") >> "<function>\\1</function>") & (
m()("(?s)(<refentryinfo>\s*)<sect1info>" +
"(<date>[^<>]*</date>)</sect1info>") >> "\\1\\2")
def value(self):
return self.text + self.book_end_chapters + self.book_end
def htm2dbk_files(args):
doc = htm2dbk_document()
for filename in args:
try:
f = open(filename, "r")
doc.filename = filename
doc.add(f.read())
f.close()
except IOError, e:
print >> sys.stderr, "can not open "+filename
return doc.value()
def html2docbook(text):
""" the C comment may contain html markup - simulate with docbook tags """
return htm2dbk_conversion().convert2(text)
if __name__ == "__main__":
print htm2dbk_files(sys.argv[1:])
|
# Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import argparse
import pkg_resources
import setuptools
import clint
import requests
import requests_toolbelt
import pkginfo
import twine
from twine._installed import Installed
def _registered_commands(group='twine.registered_commands'):
registered_commands = pkg_resources.iter_entry_points(group=group)
return dict((c.name, c) for c in registered_commands)
def list_dependencies_and_versions():
return [
('pkginfo', Installed(pkginfo).version),
('requests', requests.__version__),
('setuptools', setuptools.__version__),
('requests-toolbelt', requests_toolbelt.__version__),
| ('clint', clint.__version__),
]
def dep_versions():
return ', '.join(
'{0}: {1}'.format(*dependency)
for dependency in list_dependencies_and_versions()
)
def dispatch(argv):
registered_c | ommands = _registered_commands()
parser = argparse.ArgumentParser(prog="twine")
parser.add_argument(
"--version",
action="version",
version="%(prog)s version {0} ({1})".format(twine.__version__,
dep_versions()),
)
parser.add_argument(
"command",
choices=registered_commands.keys(),
)
parser.add_argument(
"args",
help=argparse.SUPPRESS,
nargs=argparse.REMAINDER,
)
args = parser.parse_args(argv)
main = registered_commands[args.command].load()
main(args.args)
|
t. """
def solve_poly_system(seq, *gens, **args):
"""
Solve a system of polynomial equations.
Examples
========
>>> from sympy import solve_poly_system
>>> from sympy.abc import x, y
>>> solve_po | ly_system([x*y - 2*y, 2*y**2 - x**2], x, y)
[(0, 0), (2, -sqrt(2)), (2, sqrt(2))]
"""
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
except PolificationFailed, exc:
raise Computat | ionFailed('solve_poly_system', len(seq), exc)
if len(polys) == len(opt.gens) == 2:
f, g = polys
a, b = f.degree_list()
c, d = g.degree_list()
if a <= 2 and b <= 2 and c <= 2 and d <= 2:
try:
return solve_biquadratic(f, g, opt)
except SolveFailed:
pass
return solve_generic(polys, opt)
def solve_biquadratic(f, g, opt):
"""Solve a system of two bivariate quadratic polynomial equations.
Examples
========
>>> from sympy.polys import Options, Poly
>>> from sympy.abc import x, y
>>> from sympy.solvers.polysys import solve_biquadratic
>>> NewOption = Options((x, y), {'domain': 'ZZ'})
>>> a = Poly(y**2 - 4 + x, y, x, domain='ZZ')
>>> b = Poly(y*2 + 3*x - 7, y, x, domain='ZZ')
>>> solve_biquadratic(a, b, NewOption)
[(1/3, 3), (41/27, 11/9)]
>>> a = Poly(y + x**2 - 3, y, x, domain='ZZ')
>>> b = Poly(-y + x - 4, y, x, domain='ZZ')
>>> solve_biquadratic(a, b, NewOption)
[(-sqrt(29)/2 + 7/2, -sqrt(29)/2 - 1/2), (sqrt(29)/2 + 7/2, -1/2 + sqrt(29)/2)]
"""
G = groebner([f, g])
if len(G) == 1 and G[0].is_ground:
return None
if len(G) != 2:
raise SolveFailed
p, q = G
x, y = opt.gens
p = Poly(p, x, expand=False)
q = q.ltrim(-1)
p_roots = [ rcollect(expr, y) for expr in roots(p).keys() ]
q_roots = roots(q).keys()
solutions = []
for q_root in q_roots:
for p_root in p_roots:
solution = (p_root.subs(y, q_root), q_root)
solutions.append(solution)
return sorted(solutions)
def solve_generic(polys, opt):
"""
Solve a generic system of polynomial equations.
Returns all possible solutions over C[x_1, x_2, ..., x_m] of a
set F = { f_1, f_2, ..., f_n } of polynomial equations, using
Groebner basis approach. For now only zero-dimensional systems
are supported, which means F can have at most a finite number
of solutions.
The algorithm works by the fact that, supposing G is the basis
of F with respect to an elimination order (here lexicographic
order is used), G and F generate the same ideal, they have the
same set of solutions. By the elimination property, if G is a
reduced, zero-dimensional Groebner basis, then there exists an
univariate polynomial in G (in its last variable). This can be
solved by computing its roots. Substituting all computed roots
for the last (eliminated) variable in other elements of G, new
polynomial system is generated. Applying the above procedure
recursively, a finite number of solutions can be found.
The ability of finding all solutions by this procedure depends
on the root finding algorithms. If no solutions were found, it
means only that roots() failed, but the system is solvable. To
overcome this difficulty use numerical algorithms instead.
References
==========
.. [Buchberger01] B. Buchberger, Groebner Bases: A Short
Introduction for Systems Theorists, In: R. Moreno-Diaz,
B. Buchberger, J.L. Freire, Proceedings of EUROCAST'01,
February, 2001
.. [Cox97] D. Cox, J. Little, D. O'Shea, Ideals, Varieties
and Algorithms, Springer, Second Edition, 1997, pp. 112
Examples
========
>>> from sympy.polys import Poly, Options
>>> from sympy.solvers.polysys import solve_generic
>>> from sympy.abc import x, y
>>> NewOption = Options((x, y), {'domain': 'ZZ'})
>>> a = Poly(x - y + 5, x, y, domain='ZZ')
>>> b = Poly(x + y - 3, x, y, domain='ZZ')
>>> solve_generic([a, b], NewOption)
[(-1, 4)]
>>> a = Poly(x - 2*y + 5, x, y, domain='ZZ')
>>> b = Poly(2*x - y - 3, x, y, domain='ZZ')
>>> solve_generic([a, b], NewOption)
[(11/3, 13/3)]
>>> a = Poly(x**2 + y, x, y, domain='ZZ')
>>> b = Poly(x + y*4, x, y, domain='ZZ')
>>> solve_generic([a, b], NewOption)
[(0, 0), (1/4, -1/16)]
"""
def _is_univariate(f):
"""Returns True if 'f' is univariate in its last variable. """
for monom in f.monoms():
if any(m > 0 for m in monom[:-1]):
return False
return True
def _subs_root(f, gen, zero):
"""Replace generator with a root so that the result is nice. """
p = f.as_expr({gen: zero})
if f.degree(gen) >= 2:
p = p.expand(deep=False)
return p
def _solve_reduced_system(system, gens, entry=False):
"""Recursively solves reduced polynomial systems. """
if len(system) == len(gens) == 1:
zeros = roots(system[0], gens[-1]).keys()
return [ (zero,) for zero in zeros ]
basis = groebner(system, gens, polys=True)
if len(basis) == 1 and basis[0].is_ground:
if not entry:
return []
else:
return None
univariate = filter(_is_univariate, basis)
if len(univariate) == 1:
f = univariate.pop()
else:
raise NotImplementedError("only zero-dimensional systems supported (finite number of solutions)")
gens = f.gens
gen = gens[-1]
zeros = roots(f.ltrim(gen)).keys()
if not zeros:
return []
if len(basis) == 1:
return [ (zero,) for zero in zeros ]
solutions = []
for zero in zeros:
new_system = []
new_gens = gens[:-1]
for b in basis[:-1]:
eq = _subs_root(b, gen, zero)
if eq is not S.Zero:
new_system.append(eq)
for solution in _solve_reduced_system(new_system, new_gens):
solutions.append(solution + (zero,))
return solutions
try:
result = _solve_reduced_system(polys, opt.gens, entry=True)
except CoercionFailed:
raise NotImplementedError
if result is not None:
return sorted(result)
else:
return None
def solve_triangulated(polys, *gens, **args):
"""
Solve a polynomial system using Gianni-Kalkbrenner algorithm.
The algorithm proceeds by computing one Groebner basis in the ground
domain and then by iteratively computing polynomial factorizations in
appropriately constructed algebraic extensions of the ground domain.
Examples
========
>>> from sympy.solvers.polysys import solve_triangulated
>>> from sympy.abc import x, y, z
>>> F = [x**2 + y + z - 1, x + y**2 + z - 1, x + y + z**2 - 1]
>>> solve_triangulated(F, x, y, z)
[(0, 0, 1), (0, 1, 0), (1, 0, 0)]
References
==========
1. Patrizia Gianni, Teo Mora, Algebraic Solution of System of
Polynomial Equations using Groebner Bases, AAECC-5 on Applied Algebra,
Algebraic Algorithms and Error-Correcting Codes, LNCS 356 247--257, 1989
"""
G = groebner(polys, gens, polys=True)
G = list(reversed(G))
domain = args.get('domain')
if domain is not None:
for i, g in enumerate(G):
G[i] = g.set_domain(domain)
f, G = G[0].ltrim(-1), G[1:]
dom = f.get_domain()
zeros = f.ground_roots()
solutions = set([])
for zero in zeros:
solutions.add(((zero,), dom))
var_seq = reversed(gens[:-1])
vars_seq = postfixes(gens[1:])
for var, vars in zip(var_seq, vars_seq):
_solutions = set([])
for values, dom in solutions:
H, mapping = [], zip(vars, values)
for g in G:
_vars = (var,) + vars
if g.has_only_gens(*_vars) and g.degree(var) != 0:
h = g.ltrim(var).eval(dict(mapping))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('rh', '0001_initial'),
('estoque', '0005_auto_20141001_0953'),
('comercial', '0007_auto_20141006_1852'),
('almoxarifado', '0003_auto_20140917_0843'),
]
operations = [
migrations.CreateModel(
name='LinhaListaMaterial',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quantidade_requisitada', models.DecimalField(max_digits=10, decimal_places=2)),
('quantidade_ja_atendida', models.DecimalField(max_digits=10, decimal_places=2)),
('criado', models.DateTim | eField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add | =True)),
('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LinhaListaMaterialCompra',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quantidade', models.DecimalField(max_digits=10, decimal_places=2)),
('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)),
('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LinhaListaMaterialEntregue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quantidade', models.DecimalField(max_digits=10, decimal_places=2)),
('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)),
('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ListaMaterialCompra',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ativa', models.BooleanField(default=True)),
('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)),
('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)),
('contrato', models.ForeignKey(blank=True, to='comercial.ContratoFechado', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ListaMaterialDoContrato',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ativa', models.BooleanField(default=True)),
('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)),
('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)),
('contrato', models.OneToOneField(null=True, blank=True, to='comercial.ContratoFechado')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ListaMaterialEntregue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('entregue', models.BooleanField(default=False)),
('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)),
('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)),
('contrato', models.ForeignKey(blank=True, to='comercial.ContratoFechado', null=True)),
('entregue_para', models.ForeignKey(related_name=b'entregue_para_set', to='rh.Funcionario')),
('entregue_por', models.ForeignKey(related_name=b'entregue_por_set', to='rh.Funcionario')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='linhalistamaterialentregue',
name='lista',
field=models.ForeignKey(to='almoxarifado.ListaMaterialEntregue'),
preserve_default=True,
),
migrations.AddField(
model_name='linhalistamaterialentregue',
name='produto',
field=models.ForeignKey(to='estoque.Produto'),
preserve_default=True,
),
migrations.AddField(
model_name='linhalistamaterialcompra',
name='lista',
field=models.ForeignKey(to='almoxarifado.ListaMaterialCompra'),
preserve_default=True,
),
migrations.AddField(
model_name='linhalistamaterialcompra',
name='produto',
field=models.ForeignKey(to='estoque.Produto'),
preserve_default=True,
),
migrations.AddField(
model_name='linhalistamaterial',
name='lista',
field=models.ForeignKey(to='almoxarifado.ListaMaterialDoContrato'),
preserve_default=True,
),
migrations.AddField(
model_name='linhalistamaterial',
name='produto',
field=models.ForeignKey(to='estoque.Produto'),
preserve_default=True,
),
]
|
__author__ = "Martin Jakomin, Mateja Rojko"
"""
Classes for boolean operators:
- Var
- Neg
- Or
- And
- Const
Functions:
- nnf
- simplify
- cnf
- solve
- simplify_cnf
"""
import itertools
# functions
def nnf(f):
""" Returns negation normal form """
return f.nnf()
def simplify(f):
""" Simplifies the expression """
return nnf(f).simplify()
def cnf(f):
""" Returns conjunctive normal form """
return nnf(f).cnf().simplify()
def solve(f, v):
""" Solves the expression using the variable values v """
return f.solve(v)
def simplify_cnf(f, v):
""" Simplifies the cnf form using the variable values v """
return cnf(f).simplify_cnf(v).simplify()
# classes
class Var():
"""
Variable
"""
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def solve(self, v):
return v[self.name]
def simplify_cnf(self, v):
if self.name in v:
return Const(v[self.name])
else:
return self
def nnf(self):
return self
def simplify(self):
return self
def cnf(self):
return self
def length(self):
return 1
class Neg():
"""
Negation operator
"""
def __in | it__(self,v):
self.value = v
def __str__(self):
return "~" + str(self.value.__str__())
def solve(self, v):
return not(self.value.solve(v))
def simplify_cnf(self, v):
if self.value.name in v:
return Const(not(v[self.value.name]))
else:
return self
def nnf(self):
v = self.value
if isinstance(v, Var):
return Neg(v)
elif isinstance(v, Neg):
| return v.value.nnf()
elif isinstance(v, And):
return Or([Neg(x) for x in v.value]).nnf()
elif isinstance(v, Or):
return And([Neg(x) for x in v.value]).nnf()
elif isinstance(v, Const):
return v.negate()
def simplify(self):
return self
def cnf(self):
return self
def length(self):
return self.value.length()
class And():
"""
And operator
"""
def __init__(self,lst):
self.value = lst
def __str__(self):
s = "("
for i in self.value:
s += str(i)+" & "
s = s[:len(s)-3]
return s + ")"
def solve(self, v):
for l in self.value:
if l.solve(v) is False:
return False
return True
def simplify_cnf(self, v):
return And([x.simplify_cnf(v) for x in self.value])
def nnf(self):
return And([x.nnf() for x in self.value])
def simplify(self):
s = [x.simplify() for x in self.value]
# And list flatten
ns = []
for x in s:
if isinstance(x, And):
ns.extend(x.value)
else:
ns.append(x)
s = ns
snames = [x.simplify().__str__() for x in s]
s2 = []
for i, x in enumerate(s):
if Neg(x).nnf().__str__() in snames[i+1:]:
return Const(False)
elif isinstance(x, Const):
if x.value is False:
return Const(False)
elif snames[i] not in snames[i+1:]:
s2.append(x)
if len(s2) < 1:
return Const(True)
elif len(s2) is 1:
return s2[0]
return And(s2)
def cnf(self):
return And([x.cnf().simplify() for x in self.value])
def length(self):
return sum([x.length() for x in self.value])
class Or():
"""
Or operator
"""
def __init__(self, lst):
self.value = lst
def __str__(self):
s = "("
for i in self.value:
s += str(i)+" | "
s = s[:len(s)-3]
return s + ")"
def solve(self, v):
for l in self.value:
if l.solve(v) is True:
return True
return False
def simplify_cnf(self, v):
return Or([x.simplify_cnf(v) for x in self.value])
def nnf(self):
return Or([x.nnf() for x in self.value])
def simplify(self):
s = [x.simplify() for x in self.value]
# Or list flatten
ns = []
for x in s:
if isinstance(x,Or):
ns.extend(x.value)
else:
ns.append(x)
s = ns
snames = [x.simplify().__str__() for x in s]
s2 = []
for i, x in enumerate(s):
if Neg(x).nnf().__str__() in snames[i+1:]:
return Const(True)
elif isinstance(x, Const):
if x.value is True:
return Const(True)
elif snames[i] not in snames[i+1:]:
s2.append(x)
if len(s2) < 1:
return Const(False)
elif len(s2) is 1:
return s2[0]
return Or(s2)
def cnf(self):
s = [x.cnf().simplify() for x in self.value]
s1 = [x.value if isinstance(x, And) else [x] for x in s]
s2 = []
for e in itertools.product(*s1):
s3 = []
for x in e:
if isinstance(x,Or):
s3.extend(x.value)
else:
s3.append(x)
s2.append(Or(s3))
if len(s2) is 1:
return s2[0]
return And(s2)
def length(self):
return sum([x.length() for x in self.value])
class Const():
"""
Constant
"""
def __init__(self, c):
self.value = c
def __str__(self):
return str(self.value)
def solve(self, v):
return self.value
def simplify_cnf(self, v):
return self
def nnf(self):
return self
def negate(self):
if self.value is True:
return Const(False)
return Const(True)
def simplify(self):
return self
def cnf(self):
return self
def length(self):
return 1
|
from time import sleep
from tqdm import tqdm
import requests
url = "http://raw.githubusercontent.com/Alafazam/lecture_notes/master/Cormen%20.pdf"
response = requests.get(url, stream=True)
with open("10MB", "wb") as handle:
total_length = int(response.headers.get('content-length'))/1024
for data in tqdm(response.iter_content(chunk_size=1024),total=total_length, leave=True, unit='KB'):
handle.write(data)
# with open("10MB", 'wb') as f:
# r | = requests.get(url, stream=True)
# for chunk in tqdm(r.iter_content() | ):
# f.write(chunk)
# from tqdm import tqdm
# for i in tqdm(range(10000)):
# sleep(0.01) |
from mock import patch
from tests import BaseTestCase
from redash.tasks import refresh_schemas
class TestRefreshSchemas(BaseTestCase):
def test_calls_refresh_of_all_data_sources(self):
self.factory.data_source # trigger creation
with patch(
"redash.tasks.queries.maintenance.ref | resh_schema.delay"
) as refresh_job:
refresh_schemas()
refresh_job.assert_called()
def test_skips_paused_data_sources(self):
self.factory.data_source.pause()
with patch(
"redash.tasks.queries.maintenance.refresh_schema.delay"
| ) as refresh_job:
refresh_schemas()
refresh_job.assert_not_called()
self.factory.data_source.resume()
with patch(
"redash.tasks.queries.maintenance.refresh_schema.delay"
) as refresh_job:
refresh_schemas()
refresh_job.assert_called()
|
from pymongo impor | t MongoClient
from passlib.app import custom_app_context as pwd
client = MongoClient( host = "db" )
ride_sharing = client.ride_sharing
users = ride_sharing.users
users.insert_one( {
'username' : 'sid | ',
'password_hash' : pwd.encrypt( 'test' ),
'role' : 'driver' } )
|
"""This module prints lists that may or may not contain ne | sted lists"""
def print_lol(the_list):
| """This function takes a positional argument: called "the_list", which is any
Python list which may include nested lists. Each data item in the provided lists
recursively printed to the screen on its own line."""
for each_item in the_list:
if isinstance(each_item,list):
print_lol(each_item)
else:
print(each_item)
|
import os
import PIL
import math
import PIL
from PIL import Image
class MandelbrotImage:
def __init__(self, folder):
self.folder = folder
self.data_folder = os.path.join(folder, 'data')
self.image_folder = os.path.join(folder, 'image')
if not os.path.isdir(self.image_folder):
os.makedirs(self.image_folder)
def list_data_files(self):
fnames = [fname for fname in os.listdir(self.data_folder)]
fnames = [fname for fname in fnames if fname.endswith('.data')]
fnames.sort(key=lambda x: int(x.split(".")[0]))
return fnames
def data_file_to_data(self, filepath):
with open(os.path.join(self.data_folder, filepath)) as file:
data = file.read()
data = data.split(" ")
width, height, max_iterations, precision = data[:4]
data = data[4:]
return int(width), int(height), int(max_iterations), int(precision), data
def data_to_pixel_data(self, data, coloring_scheme):
pixel_data = []
for i in xrange(0, len(data), 3):
escape_time = data[i]
z_real = data[i+1]
z_imag = data[i+2]
color = coloring_scheme(escape_time, z_real, z_imag, max_iter)
pixel_data.append(color)
return pixel_data
def pixel_d | ata_to_image(self, filename, pixel_data, width, height):
image = Image.new('RGB', (width, height))
image.putdata(pixel_data)
image.save(os.path.join(self.image_folder, filename))
def coloring(escape_time, z_real, z_imag, max_iterations):
escape_time = int(escape_time)
z_real = float(z_real)
z_imag | = float(z_imag)
max_iterations = int(max_iterations)
if escape_time == max_iterations + 1:
return (255, 255, 255)
else:
q = escape_time - math.log(math.log((z_real ** 2 + z_imag ** 2))/(2*math.log(2)))
return (int(q*255./max_iterations), 0, 0)
f = "1"
A = MandelbrotImage("1")
for idx, file in enumerate(A.list_data_files()):
width, height, max_iter, precision, data = A.data_file_to_data(file)
pixel_data = A.data_to_pixel_data(data, coloring)
A.pixel_data_to_image("%s.png" % idx, pixel_data, width, height)
print "Done with file %s" % file
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICA | TIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.obj | ect import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/bio_engineer/bio_component/shared_bio_component_food_duration_2.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
"WandererActionsPlanned"
MEM_CURRENT_ACTIONS = "WandererActionsInProgress"
MEM_COMPLETED_ACTIONS = "WandererActionsCompleted"
MEM_CURRENT_EVENT = "WandererEvent"
MEM_MAP = "WandererMap"
MEM_LOCATION = "WandererLocation"
EVENT_LOOK_FOR_PEOPLE = "WandererEventLookForPeople"
DEFAULT_CONFIG_FILE = "wanderer"
PROPERTY_PLANNER_CLASS = "plannerClass"
DEFAULT_PLANNER_CLASS = "wanderer.randomwalk.RandomWalk"
PROPERTY_EXECUTOR_CLASS = "executorClass"
DEFAULT_EXECUTOR_CLASS = "wanderer.wanderer.PlanExecutor"
PROPERTY_MAPPER_CLASS = "mapperClass"
DEFAULT_MAPPER_CLASS = "wanderer.wanderer.NullMapper"
PROPERTY_UPDATER_CLASSES = "updaterClasses"
PROPERTY_HTTP_PORT = "httpPort"
DEFAULT_HTTP_PORT = 8080
PROPERTY_DATA_COLLECTOR_HOST = "dataCollectorHost"
PROPERTY_DATA_COLLECTOR_PORT = "dataCollectorPort"
PROPERTY_LOOK_FOR_PEOPLE = "lookForPeople"
STATIC_WEB_DIR = "web"
CENTRE_BIAS = False
HEAD_HORIZONTAL_OFFSET = 0
WANDERER_NAME = "wanderer"
# START GLOBALS
# We put instances of planners, executors and mappers here so we don't need to continually create
# new instances
planner_instance = None
executor_instance = None
mapper_instance = None
updater_instances = None
# END GLOBALS
wanderer_logger = logging.getLogger("wanderer.wanderer")
def init_state(env, startPos):
# declare events
env.memory.declareEvent(EVENT_LOOK_FOR_PEOPLE);
# getData & removeData throw errors if the value is not set,
# so ensure all the memory locations we want to use are initialised
env.memory.insertData(MEM_CURRENT_EVENT, None)
# set "security distance"
env.memory.insertData(MEM_SECURITY_DISTANCE, "0.25")
# should we look for people as we go?
lookForPeople = env.get_property(DEFAULT_CONFIG_FILE, PROPERTY_LOOK_FOR_PEOPLE)
if lookForPeople:
env.memory.raiseEvent(EVENT_LOOK_FOR_PEOPLE, True)
env.log("Looking for people")
else:
env.memory.raiseEvent(EVENT_LOOK_FOR_PEOPLE, False)
env.log("Not looking for people")
# set initial position (in list of positions)
env.memory.insertData(MEM_WALK_PATH, [startPos])
# current actions and completed actions
env.memory.insertData(MEM_PLANNED_ACTIONS, "")
env.memory.insertData(MEM_CURRENT_ACTIONS, "")
env.memory.insertData(MEM_COMPLETED_ACTIONS, "")
def shutdown(env):
planner = get_planner_instance(env)
planner.shutdown()
executor = get_executor_instance(env, None)
executor.shutdown()
mapper = get_mapper_instance(env)
mapper.shutdown()
updater_instances = get_updaters(env)
for updater in updater_instances:
updater.shutdown()
'''
Base class for wanderer planning.
Handles generating plans and reacting to events
'''
class Planner(object):
def __init__(self, env_):
super(Planner, self).__init__()
self.env = env_
def handleEvent(self, event, state):
plan = self.dispatch(event, state)
save_plan(self.env, plan)
log_plan(self.env, "New plan", plan)
| return plan
# return true if this event should cause the current plan to be executed and
# a new plan created to react to it
def does_event_interrupt_plan(self, event, state):
return True
def dispatch(self, event, state):
methodName = 'handle'+ event.name()
try:
method = getattr(self, methodName)
return method(event, state)
except AttributeError:
self.env.log("Unimplemented event han | dler for: {}".format(event.name()))
def shutdown(self):
pass
'''
Base class for executing plans. Since we may need to trigger choreographe
boxes we delegate actually performing a single action to an actionExecutor
which in most cases will be the choreographe box that called us.
The actionExecutor must implement do_action(action) and all_done()
'''
class PlanExecutor(object):
def __init__(self, env, actionExecutor):
super(PlanExecutor, self).__init__()
self.env = env
self.actionExecutor = actionExecutor
def perform_next_action(self):
self.env.log("perform next action")
# save completed action to history if there is one
completedAction = get_current_action(self.env)
self.env.log("Completed action = {}".format(repr(completedAction)))
if not completedAction is None:
if not isinstance(completedAction, NullAction):
push_completed_action(self.env, completedAction)
# if we have moved, then save current location
if isinstance(completedAction, Move):
self._have_moved_wrapper()
self.env.log("set current action to NullAction")
# ensure that current action is cleared until we have another one
set_current_action(self.env, NullAction())
self.env.log("pop from plan")
# pop first action from plan
action = pop_planned_action(self.env)
if action is None:
self.env.log("No next action")
self.actionExecutor.all_done()
else:
self.env.log("Next action = {}".format(repr(action)))
set_current_action(self.env, action)
self.actionExecutor.do_action(action)
self.env.log("perform_next_action done")
# get current and previous positions and call have_moved
# it's not intended that this method be overridden
def _have_moved_wrapper(self):
self.env.log("Have moved")
pos = get_position(self.env)
lastPos = get_last_position(self.env)
self.have_moved(lastPos, pos)
save_waypoint(self.env, pos)
# hook for base classes to implement additional functionality
# after robot has moved
def have_moved(self, previousPos, currentPos):
pass
def save_position(self):
pos = get_position(self.env)
save_waypoint(self.env, pos)
def shutdown(self):
pass
'''
Abstract mapping class
'''
class AbstractMapper(object):
def __init__(self, env):
super(AbstractMapper, self).__init__()
self.env = env
# update map based on new sensor data
def update(self, position, sensors):
pass
# return the current map
def get_map(self):
return None
def shutdown(self):
pass
'''
Null mapper - does nothing, just a place holder for when no mapping is actually required
'''
class NullMapper(AbstractMapper):
def __init__(self, env):
super(NullMapper, self).__init__(env)
'''
Mapper that does no actual mapping, but logs all data to file for future analysis
'''
class FileLoggingMapper(AbstractMapper):
def __init__(self, env, save_data=True):
super(FileLoggingMapper, self).__init__(env)
self.save_data = save_data
if self.save_data:
self.open_data_file()
# save the data to file
def update(self, position, sensors):
if self.save_data:
self.save_update_data(position, sensors)
def open_data_file(self):
self.logFilename = tempfile.mktemp()
self.env.log("Saving sensor data to {}".format(self.logFilename))
self.first_write = True
try:
self.logFile = open(self.logFilename, 'r+')
except IOError:
self.env.log("Failed to open file: {}".format(self.logFilename))
self.logFile = None
def save_update_data(self, position, sensors):
if self.logFile:
data = { 'timestamp' : self.timestamp(),
'position' : position,
'leftSonar' : sensors.get_sensor('LeftSonar'),
'rightSonar' : sensors.get_sensor('RightSonar') }
jstr = json.dumps(data)
#self.env.log("Mapper.update: "+jstr)
if not self.first_write:
self.logFile.write(",\n")
self.logFile.write(jstr)
self.first_write = False
self.logFile.flush()
def timestamp(self):
return datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')
# TODO should really block write access while doing this
def write_sensor_data_to_file(self, |
# -*- coding: utf-8 -*-
from | __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('attivita', '0012_attivita_centrale_operativa'),
]
operations = [
migrations.AddField(
model_name='partecipazione',
name='centrale_operativa',
| field=models.BooleanField(default=False, db_index=True),
),
]
|
# -*- coding: utf-8 -*-
#
# RERO ILS
# Copyright (C) 2019 RERO
# Copyright (C) 2020 UCLouvain
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""API for manipulating organisation."""
from functools import partial
from elasticsearch.exceptions import NotFoundError
from .models import OrganisationIdentifier, OrganisationMetadata
from ..api import IlsRecord, IlsRecordsIndexer, IlsRecordsSearch
from ..fetchers import id_fetcher
from ..item_types.api import ItemTypesSearch
from ..libraries.api import LibrariesSearch, Library
from ..minters import id_minter
from ..providers import Provider
from ..utils import sorted_pids
from ..vendors.api import Vendor, VendorsSearch
# provider
OrganisationProvider = type(
'OrganisationProvider',
(Provider,),
dict(identifier=OrganisationIdentifier, pid_type='org')
)
# minter
organisation_id_minter = partial(id_minter, provider=OrganisationProvider)
# fetcher
organisation_id_fetcher = partial(id_fetcher, provider=OrganisationProvider)
class OrganisationsSearch(IlsRecordsSearch):
"""Organisation search."""
class Meta:
"""Meta class."""
index = 'organisations'
doc_types = None
fields = ('*', )
facets = {}
default_filter = None
def get_record_by_viewcode(self, viewcode, fields=None):
"""Search by viewcode."""
query = self.filter('term', code=viewcode).extra(size=1)
if fields:
query = query.source(includes=fields)
response = query.execute()
if response.hits.total.value != 1:
raise NotFoundError(
f'Organisation viewcode {viewcode}: Result not found.')
return response.hits.hits[0]._source
class Organisation(IlsRecord):
"""Organisati | on class."""
minter = organisation_id_minter
fetcher = organisation_id_fetcher
provider = OrganisationProvider
model_cls = OrganisationMetadata
@classmethod
def get_all(cls):
"""Get all organisations."""
return sorted([
| Organisation.get_record_by_id(_id)
for _id in Organisation.get_all_ids()
], key=lambda org: org.get('name'))
@classmethod
def all_code(cls):
"""Get all code."""
return [org.get('code') for org in cls.get_all()]
@classmethod
def get_record_by_viewcode(cls, viewcode):
"""Get record by view code."""
result = OrganisationsSearch().filter(
'term',
code=viewcode
).execute()
if result['hits']['total']['value'] != 1:
raise Exception(
'Organisation (get_record_by_viewcode): Result not found.')
return result['hits']['hits'][0]['_source']
@classmethod
def get_record_by_online_harvested_source(cls, source):
"""Get record by online harvested source.
:param source: the record source
:return: Organisation record or None.
"""
results = OrganisationsSearch().filter(
'term', online_harvested_source=source).scan()
try:
return Organisation.get_record_by_pid(next(results).pid)
except StopIteration:
return None
@property
def organisation_pid(self):
"""Get organisation pid ."""
return self.pid
def online_circulation_category(self):
"""Get the default circulation category for online resources."""
results = ItemTypesSearch().filter(
'term', organisation__pid=self.pid).filter(
'term', type='online').source(['pid']).scan()
try:
return next(results).pid
except StopIteration:
return None
def get_online_locations(self):
"""Get list of online locations."""
return [library.online_location
for library in self.get_libraries() if library.online_location]
def get_libraries_pids(self):
"""Get all libraries pids related to the organisation."""
results = LibrariesSearch().source(['pid'])\
.filter('term', organisation__pid=self.pid)\
.scan()
for result in results:
yield result.pid
def get_libraries(self):
"""Get all libraries related to the organisation."""
pids = self.get_libraries_pids()
for pid in pids:
yield Library.get_record_by_pid(pid)
def get_vendor_pids(self):
"""Get all vendor pids related to the organisation."""
results = VendorsSearch().source(['pid'])\
.filter('term', organisation__pid=self.pid)\
.scan()
for result in results:
yield result.pid
def get_vendors(self):
"""Get all vendors related to the organisation."""
pids = self.get_vendor_pids()
for pid in pids:
yield Vendor.get_record_by_pid(pid)
def get_links_to_me(self, get_pids=False):
"""Record links.
:param get_pids: if True list of linked pids
if False count of linked records
"""
from ..acq_receipts.api import AcqReceiptsSearch
library_query = LibrariesSearch()\
.filter('term', organisation__pid=self.pid)
receipt_query = AcqReceiptsSearch() \
.filter('term', organisation__pid=self.pid)
links = {}
if get_pids:
libraries = sorted_pids(library_query)
receipts = sorted_pids(receipt_query)
else:
libraries = library_query.count()
receipts = receipt_query.count()
if libraries:
links['libraries'] = libraries
if receipts:
links['acq_receipts'] = receipts
return links
def reasons_not_to_delete(self):
"""Get reasons not to delete record."""
cannot_delete = {}
links = self.get_links_to_me()
if links:
cannot_delete['links'] = links
return cannot_delete
def is_test_organisation(self):
"""Check if this is a test organisation."""
if self.get('code') == 'cypress':
return True
return False
class OrganisationsIndexer(IlsRecordsIndexer):
"""Holdings indexing class."""
record_cls = Organisation
def bulk_index(self, record_id_iterator):
"""Bulk index records.
:param record_id_iterator: Iterator yielding record UUIDs.
"""
super().bulk_index(record_id_iterator, doc_type='org')
|
import numpy as np
from rdkit.Chem import MolFromSmiles
from features import atom_features, bond_features
degrees = [0, 1, 2, 3, 4, 5]
class MolGraph(object):
def __init__(self):
self.nodes = {} # dict of lists of nodes, keyed by node type
def new_node(self, ntype, features=None, rdkit_ix=None):
new_node = Node(ntype, features, rdkit_ix)
self.nodes.setdefault(ntype, []).append(new_node)
return new_node
def add_subgraph(self, subgraph):
old_nodes = self.nodes
new_nodes = s | ubgraph.nodes
for ntype in set(old_nodes.keys()) | set(new_nodes.keys()):
old_nodes.setdefault(ntype, []).extend(new_nodes.get(ntype, []))
def sort_nodes_by_degree(self, ntype):
nodes_by_degree = {i : [] for i in degrees}
for node in self.nodes[ntype]:
nodes_by_degree[len(node.get_neighbors(ntype))].append(node)
new_nodes = []
for degree in degrees:
cur_nodes = nodes_by_degre | e[degree]
self.nodes[(ntype, degree)] = cur_nodes
new_nodes.extend(cur_nodes)
self.nodes[ntype] = new_nodes
def feature_array(self, ntype):
assert ntype in self.nodes
return np.array([node.features for node in self.nodes[ntype]])
def rdkit_ix_array(self):
return np.array([node.rdkit_ix for node in self.nodes['atom']])
def neighbor_list(self, self_ntype, neighbor_ntype):
assert self_ntype in self.nodes and neighbor_ntype in self.nodes
neighbor_idxs = {n : i for i, n in enumerate(self.nodes[neighbor_ntype])}
return [[neighbor_idxs[neighbor]
for neighbor in self_node.get_neighbors(neighbor_ntype)]
for self_node in self.nodes[self_ntype]]
class Node(object):
__slots__ = ['ntype', 'features', '_neighbors', 'rdkit_ix']
def __init__(self, ntype, features, rdkit_ix):
self.ntype = ntype
self.features = features
self._neighbors = []
self.rdkit_ix = rdkit_ix
def add_neighbors(self, neighbor_list):
for neighbor in neighbor_list:
self._neighbors.append(neighbor)
neighbor._neighbors.append(self)
def get_neighbors(self, ntype):
return [n for n in self._neighbors if n.ntype == ntype]
def graph_from_smiles_tuple(smiles_tuple):
graph_list = [graph_from_smiles(s) for s in smiles_tuple]
big_graph = MolGraph()
for subgraph in graph_list:
big_graph.add_subgraph(subgraph)
# This sorting allows an efficient (but brittle!) indexing later on.
big_graph.sort_nodes_by_degree('atom')
return big_graph
def graph_from_smiles(smiles):
graph = MolGraph()
mol = MolFromSmiles(smiles)
if not mol:
raise ValueError("Could not parse SMILES string:", smiles)
atoms_by_rd_idx = {}
for atom in mol.GetAtoms():
new_atom_node = graph.new_node('atom', features=atom_features(atom), rdkit_ix=atom.GetIdx())
atoms_by_rd_idx[atom.GetIdx()] = new_atom_node
for bond in mol.GetBonds():
atom1_node = atoms_by_rd_idx[bond.GetBeginAtom().GetIdx()]
atom2_node = atoms_by_rd_idx[bond.GetEndAtom().GetIdx()]
new_bond_node = graph.new_node('bond', features=bond_features(bond))
new_bond_node.add_neighbors((atom1_node, atom2_node))
atom1_node.add_neighbors((atom2_node,))
mol_node = graph.new_node('molecule')
mol_node.add_neighbors(graph.nodes['atom'])
return graph
|
import csv
import operator
import itertools
import math
import logger1
import re
#main piece of code for calculating & wwriting alignments from processed data
def calculateAlignments(utterances, markers, smoothing, outputFile, shouldWriteHeader, corpusType='CHILDES'):
markers = checkMarkers(markers)
groupedUtterances = group(utterances)
metaData = metaDataExtractor(groupedUtterances,markers,corpusType)
results = runFormula(metaData, markers, smoothing,corpusType)
writeFile(results, outputFile, shouldWriteHeader)
return results
# Converts list of markers in a message to categories
def determineCategories(msgMarkers,catdict,useREs=False):
msgCats = []
#iterate over catdict items {category: [words/REs]}
for cd in catdict.items():
if useREs:
if any(any(wordre.match(marker) for marker in msgMarkers) for wordre in cd[1]): #if REs, see if any tokens match each RE
msgCats.append(cd[0])
else:
if any(word in msgMarkers for word in cd[1]): #if just words, see if any word in category also in msg
msgCats.append(cd[0])
return msgCats
# Groups tweets by conversation ids
def group(utterances):
utterances.sort(key=operator.itemgetter('convId'))
list1 = []
for key, items in itertools.groupby(utterances, operator.itemgetter('convId')):
list1.append(list(items))
return list1
#code to convert marker list structure to {category: [words]} structure
def makeCatDict(markers,useREs=False):
mdict = {}
for m in markers:
marker = re.compile(''.join([m["marker"], '$'])) if useREs else m["marker"]
if m["category"] in mdict:
mdict[m["category"]].append(marker)
else:
mdict[m["category"]] = [marker]
#mdict[m["category"]] = mdict.get(m["category"],[]).append(m["marker"]) #Need to swap marker and category labels
#mdict[m["marker"]] = mdict.get(m["marker"],[]).append(m["category"])
return(mdict)
#Given a conversation & the list of markers, extract counts of speaker & replier using each marker
def findMarkersInConvo(markers,convo):
ba = {} # Number of times Person A and person B says the marker["marker"]
bna = {}
nbna = {}
nba = {}
for utterance in convo:
for j, marker in enumerate(markers):
word = marker["marker"]
msgMarker = word in utterance["msgMarkers"]
replyMarker = word in utterance["replyMarkers"]
if msgMarker and replyMarker:
ba[word] = ba.get(word,0) + 1
elif replyMarker and not msgMarker:
bna[word] = bna.get(word,0) + 1
elif not replyMarker and msgMarker:
nba[word] = nba.get(word,0) + 1
else:
nbna[word] = nbna.get(word,0) + 1
return({'ba': ba,'bna': bna,'nba': nba,'nbna': nbna})
#Copying portions of one dictionary to another (faster than copy(), if you can believe it!)
def addFeats(toAppend,utterance,renameIds=True,corpusType=''):
if renameIds:
toAppend["speakerId"] = utterance["msgUserId"]
toAppend["replierId"] = utterance["replyUserId"]
else:
toAppend["speakerId"] = utterance["speakerId"]
toAppend["replierId"] = utterance["replierId"]
if(corpusType=='Twitter'):
toAppend["reciprocity"] = utterance["reciprocity"]
toAppend["verifiedSpeaker"] = bool(utterance["verifiedSpeaker"])
toAppend["verifiedReplier"] = bool(utterance["verifiedReplier"])
toAppend["speakerFollowers"] = utterance["speakerFollowers"]
toAppend["replierFollowers"] = utterance["replierFollowers"]
elif(corpusType=='CHILDES'):
toAppend["corpus"] = utterance["corpus"]
toAppend["docId"] = utterance["docId"]
return(toAppend)
# calculates the marker usage counts from conversations
def metaDataExtractor(groupedUtterances, markers,corpusType=''):
results = []
for i, convo in enumerate(groupedUtterances):
if(i % 2500 is 10):
logger1.log("On " + str(i) + " of " + str(len(groupedUtterances)))
toAppend = findMarkersInConvo(markers,convo)
toAppend = addFeats(toAppend,convo[0],True,corpusType)
results.append(toAppend)
return results
# extracts a list of markers from the marker dictionary
def allMarkers(markers):
categories = []
for marker in markers:
categories.append(marker["marker"])
return list(set(categories))
# creates a dictionary corresponding to a single row of the final output (speaker-replier-marker triplet)
def createAlignmentDict(category,result,smoothing,corpusType=''):
toAppend = {}
ba = int(result["ba"].get(category, 0))
bna = int(result["bna"].get(category, 0))
nbna = int(result["nbna"].get(category, 0))
nba = int(result["nba"].get(category, 0))
#Calculating alignment only makes sense if we've seen messages with and without the marker
if (((ba+nba)==0 or (bna+nbna)==0)):
return(None)
toAppend = addFeats(toAppend,result,False,corpusType)
toAppend["category"] = category
#Calculating Echoes of Power alignment
powerNum = ba
powerDenom = ba+nba
baseNum = ba+bna
baseDenom = ba+nba+bna+nbna
if(powerDenom != 0 and baseDenom != 0):
dnmalignment = powerNum/powerDenom - baseNum/baseDenom
toAppend["dnmalignment"] = dnmalignment
else:
toAppend["dnmalignment"] = False
powerNum = ba
powerDenom = ba+nba
baseDenom = bna+nbna
baseNum = bna
powerProb = math.log((powerNum+smoothing)/float(powerDenom+2*smoothing))
baseProb = math.log((baseNum+smoothing)/float(baseDenom+2*smoothing))
alignment = powerProb - baseProb
toAppend["alignment"] = alignment
toAppend["ba"] = ba
toAppend["bna"] = bna
toAppend["nba"] = nba
toAppend["nbna"] = nbna
return(toAppend)
# Gets us from the meta-data to the final output file
def runFormula(results, markers, smoothing,corpusType):
toReturn = []
categories = allMarkers(markers)
for i, result in enumerate(results):
if(i % 1000 is 10):
logger1.log("On result " + str(i) + " of " + str(len(results)))
for j, category in enumerate(categories):
toAppend = createAlignmentDict(category,result,smoothing,corpusType)
if toAppend is not None:
toReturn.append(toAppend)
toReturn = sorted(toReturn, key=lambda k: (k["speakerId"],k["replierId"],k["category"]))
return toReturn
# Writes stuff to the output file
def writeFile(results, outputFile, shouldWriteHeader):
if len(results) == 0:
logger1.log("No results to write =(")
return
toWrite = []
header = sorted(list(results[0].keys()))
for row in results:
toAppend = []
for key in header:
toAppend.append(row[key])
toWrite.append(toAppend)
if shouldWriteHeader:
with open(outputFile, "w", newline='') as f:
writer = csv.writer(f)
writer.writerows([header])
f.close()
with open(outputFile, "a", newline='') as f:
writer = csv.writer(f)
writer.writerows(toWrite)
f.close()
# Reads a list of markers from the markersFile
def readMarkers(markersFile,dialect=None):
if dialect is None:
reader = csv.reader(open(markersFile))
else:
reader = csv.reader(open(markersFile),dialect=dialect)
markers = []
#print('marker\tcategory')
for i, row in enumerate(reader):
toAppend = {}
toAppend["marker"] = row[0]
if(len(row) > 1):
toAppend["category"] = row[1]
else:
| toAppend["category"] = row[0]
markers. | append(toAppend)
#print(toAppend["marker"]+'\t'+toAppend["category"])
return markers
# checks & adapts the structure of the marker list to the appropriate one
def checkMarkers(markers):
toReturn = []
for marker in markers:
if isinstance(marker, str):
toReturn.append({"marker": marker, "category": marker})
else:
toReturn.append(marker)
return toReturn |
"
_repr_template = '{name}=%r'
_field_template = '''\
{name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}')
'''
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = str(typename)
if rename:
seen = set()
for index | , name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = '_%d' % index
seen.add(name)
for name in [typename] + field_names:
if type(name) != str:
raise TypeError('Type names and field names must be strin | gs')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
'identifiers: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
'keyword: %r' % name)
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
'%r' % name)
if name in seen:
raise ValueError('Encountered duplicate field name: %r' % name)
seen.add(name)
# Fill-in the class template
class_definition = _class_template.format(
typename = typename,
field_names = tuple(field_names),
num_fields = len(field_names),
arg_list = repr(tuple(field_names)).replace("'", "")[1:-1],
repr_fmt = ', '.join(_repr_template.format(name=name)
for name in field_names),
field_defs = '\n'.join(_field_template.format(index=index, name=name)
for index, name in enumerate(field_names))
)
# Execute the template string in a temporary namespace and support
# tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(__name__='namedtuple_%s' % typename)
exec(class_definition, namespace)
result = namespace[typename]
result._source = class_definition
if verbose:
print(result._source)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
########################################################################
### Counter
########################################################################
def _count_elements(mapping, iterable):
'Tally elements from the iterable.'
mapping_get = mapping.get
for elem in iterable:
mapping[elem] = mapping_get(elem, 0) + 1
try: # Load C helper function if available
from _collections import _count_elements
except ImportError:
pass
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
super().__init__()
self.update(iterable, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.items(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with an equivalent malleability clone
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class TxnMallTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 1,250 BTC:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].settxfee(.005)
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# Send tx1, and another transaction tx2 that won't be cloned
txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}]
clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"],
rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]}
clone_locktime = rawtx1["locktime"]
clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, None, clone_locktime)
# createrawtransaction randomizes the order of its outputs, so swap them if necessary.
# output 0 is at version+#inputs+input+sigstub+sequence+#outputs
# 40 BTC serialized is 00286bee00000000
pos0 = 2*(4+1+36+1+4+1)
hex40 = "00286bee00000000"
output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0)
if (rawtx1["vout"][0]["value"] == 40 and clone_raw[pos0 : pos0 + 16] != hex40 or
rawtx1["vout"][0]["value"] != 40 and clone_raw[pos0 : pos0 + 16] == hex40):
output0 = clone_raw[pos0 : pos0 + output_len]
output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len]
clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:]
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
# Have node0 mine a block, if requested:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50BTC for another
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 1219 + tx1["amount"] + tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirm | ations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total bal | ance; should be same as before the clone, + 100 BTC for 2 matured,
# less possible orphaned matured subsidy
expected += 100
if (self.options.mine_block):
expected -= 50
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*", 0), expected)
# Check node0's individual account balances.
# "foo" should have been debited by the equivalent clone of tx1
assert_equal(self.nodes[0].getbalance("foo"), 1219 + tx1["amount"] + tx1["fee"])
# "bar" should have been debited by (possibly unconfirmed) tx2
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
# "" should have starting balance, less funding txes, plus subsidies
assert_equal(self.nodes[0].getbalance("", 0), starting_balance
- 1219
+ fund_foo_tx["fee"]
- 29
+ fund_bar_tx["fee"]
+ 100)
# Node1's "from0" account balance
assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"]))
if __name__ == '__main__':
TxnMallTest().main()
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
# Import the relevant PTS classes and modules
from pts.core.basics.configuration import ConfigurationDefinition
# -----------------------------------------------------------------
# Create the configuration
definition = ConfigurationDefinition()
# Add optional arguments
definition.add_section("wavelengths")
definition.sections["wavelengths"].add_optional("unit", str, "the unit of the wavelengths", "micron")
definition.sections["wavelengths"].add_optional("min", float, "the minimum wavelength", 0.09)
definition.sections["wavelengths"].add_optional("max", float, "the maximum wavelength", 2000)
definition.sections["wavelengths"].add_optional("npoints", int, "the number of wavelength points", 100)
definition.sections["wavelengths"].add_optional("min_zoom", float, "the minimum wavelength of the zoomed-in grid", 1)
definition.sections["wavelengths"].add_optional("max_zoom", float, "the maximum wavelength of the zoomed-in grid", 30)
definition.sections["wavelengths"].add_optional("npoints_zo | om", int, "the number of wavelength points in the zoomed-in grid", 100)
definition.add_optional("packages", float, "the number of photon packages per wavelength", 2e5)
definition.add_flag("selfabsorption", "enable dust self-absorption")
definition.add_optional("dust_grid", str, "the type of dust grid to use (bintree, octtree or cartesian)", "bintree")
# ---------------------------------- | -------------------------------
|
#!/usr/bin/env python2.7
"""Docker From Scratch Workshop - | Level 4: Add overlay FS.
Goal: Instead of re-extracting the image, use it as a read-only layer
(lowerdir), and create a copy-on-write layer for changes (upperdir).
HINT: Don't forget that overlay fs also requires a workdir.
Read more on overlay FS here:
https://www.kernel.org | /doc/Documentation/filesystems/overlayfs.txt
"""
from __future__ import print_function
import linux
import tarfile
import uuid
import click
import os
import stat
import traceback
def _get_image_path(image_name, image_dir, image_suffix='tar'):
return os.path.join(image_dir, os.extsep.join([image_name, image_suffix]))
def _get_container_path(container_id, container_dir, *subdir_names):
return os.path.join(container_dir, container_id, *subdir_names)
def create_container_root(image_name, image_dir, container_id, container_dir):
image_path = _get_image_path(image_name, image_dir)
assert os.path.exists(image_path), "unable to locate image %s" % image_name
# TODO: Instead of creating the container_root and extracting to it,
# create an images_root.
# keep only one rootfs per image and re-use it
container_root = _get_container_path(container_id, container_dir, 'rootfs')
if not os.path.exists(container_root):
os.makedirs(container_root)
with tarfile.open(image_path) as t:
# Fun fact: tar files may contain *nix devices! *facepalm*
members = [m for m in t.getmembers()
if m.type not in (tarfile.CHRTYPE, tarfile.BLKTYPE)]
t.extractall(container_root, members=members)
# TODO: create directories for copy-on-write (uppperdir), overlay workdir,
# and a mount point
# TODO: mount the overlay (HINT: use the MS_NODEV flag to mount)
return container_root # return the mountpoint for the mounted overlayfs
@click.group()
def cli():
pass
def makedev(dev_path):
for i, dev in enumerate(['stdin', 'stdout', 'stderr']):
os.symlink('/proc/self/fd/%d' % i, os.path.join(dev_path, dev))
os.symlink('/proc/self/fd', os.path.join(dev_path, 'fd'))
# Add extra devices
DEVICES = {'null': (stat.S_IFCHR, 1, 3), 'zero': (stat.S_IFCHR, 1, 5),
'random': (stat.S_IFCHR, 1, 8), 'urandom': (stat.S_IFCHR, 1, 9),
'console': (stat.S_IFCHR, 136, 1), 'tty': (stat.S_IFCHR, 5, 0),
'full': (stat.S_IFCHR, 1, 7)}
for device, (dev_type, major, minor) in DEVICES.iteritems():
os.mknod(os.path.join(dev_path, device),
0o666 | dev_type, os.makedev(major, minor))
def _create_mounts(new_root):
# Create mounts (/proc, /sys, /dev) under new_root
linux.mount('proc', os.path.join(new_root, 'proc'), 'proc', 0, '')
linux.mount('sysfs', os.path.join(new_root, 'sys'), 'sysfs', 0, '')
linux.mount('tmpfs', os.path.join(new_root, 'dev'), 'tmpfs',
linux.MS_NOSUID | linux.MS_STRICTATIME, 'mode=755')
# Add some basic devices
devpts_path = os.path.join(new_root, 'dev', 'pts')
if not os.path.exists(devpts_path):
os.makedirs(devpts_path)
linux.mount('devpts', devpts_path, 'devpts', 0, '')
makedev(os.path.join(new_root, 'dev'))
def contain(command, image_name, image_dir, container_id, container_dir):
linux.unshare(linux.CLONE_NEWNS) # create a new mount namespace
linux.mount(None, '/', None, linux.MS_PRIVATE | linux.MS_REC, None)
new_root = create_container_root(
image_name, image_dir, container_id, container_dir)
print('Created a new root fs for our container: {}'.format(new_root))
_create_mounts(new_root)
old_root = os.path.join(new_root, 'old_root')
os.makedirs(old_root)
linux.pivot_root(new_root, old_root)
os.chdir('/')
linux.umount2('/old_root', linux.MNT_DETACH) # umount old root
os.rmdir('/old_root') # rmdir the old_root dir
os.execvp(command[0], command)
@cli.command(context_settings=dict(ignore_unknown_options=True,))
@click.option('--image-name', '-i', help='Image name', default='ubuntu')
@click.option('--image-dir', help='Images directory',
default='/workshop/images')
@click.option('--container-dir', help='Containers directory',
default='/workshop/containers')
@click.argument('Command', required=True, nargs=-1)
def run(image_name, image_dir, container_dir, command):
container_id = str(uuid.uuid4())
pid = os.fork()
if pid == 0:
# This is the child, we'll try to do some containment here
try:
contain(command, image_name, image_dir, container_id,
container_dir)
except Exception:
traceback.print_exc()
os._exit(1) # something went wrong in contain()
# This is the parent, pid contains the PID of the forked process
# wait for the forked child, fetch the exit status
_, status = os.waitpid(pid, 0)
print('{} exited with status {}'.format(pid, status))
if __name__ == '__main__':
cli()
|
#!usr/bin/env python
# -*- coding: utf-8! -*-
from collections import Counter, OrderedDict
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.cluster.hierarchy import ward, dendrogram
from sklearn.decomposition import PCA
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics.pairwise import pairwise_distances
from librosa.segment import agglomerative
from HACluster import VNClusterer, Clusterer
from sklearn.preprocessing import StandardScaler
from ete3 import Tree, NodeStyle, TreeStyle, AttrFace, faces, TextFace
class OrderedCounter(Counter, OrderedDict):
'Counter that remembers the order elements are first encountered'
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, OrderedDict(self))
def __reduce__(self):
return self.__class__, (OrderedDict(self),)
def pca_cluster(slice_matrix, slice_names, feature_names, prefix='en',
nb_clusters=3):
"""
Run pca on matrix and visualize samples in 1st PCs, with word loadings projected
on top. The colouring of the samples is provided by running a cluster analysis
on the samples in these first dimensions.
"""
sns.set_style('dark')
sns.plt.rcParams['axes.linewidth'] = 0.2
fig, ax1 = sns.plt.subplots()
slice_matrix = StandardScaler().fit_transform(slice_matrix)
pca = PCA(n_components=2)
pca_matrix = pca.fit_transform(slice_matrix)
pca_loadings = pca.components_.transpose()
# first plot slices:
x1, x2 = pca_matrix[:,0], pca_matrix[:,1]
ax1.scatter(x1, x2, 100, edgecolors='none', facecolors='none')
# clustering on top (for colouring):
clustering = AgglomerativeClustering(linkage='ward', affinity='euclidean', n_clusters=nb_clusters)
clustering.fit(pca_matrix)
# add slice names:
for x, y, name, cluster_label in zip(x1, x2, slice_names, clustering.labels_):
ax1.text(x, y, name.split('_')[0][:3], ha='center', va="center",
color=plt.cm.spectral(cluster_label / 10.),
fontdict={'family': 'Arial', 'size': 10})
# now loadings on twin axis:
ax2 = ax1.twinx().twiny()
l1, l2 = pca_loadings[:,0], pca_loadings[:,1]
ax2.scatter(l1, l2, 100, edgecolors='none', facecolors='none');
for x, y, l in zip(l1, l2, feature_names):
ax2.text(x, y, l ,ha='center', va="center", size=8, color="darkgrey",
fontdict={'family': 'Arial', 'size': 9})
# control aesthetics:
ax1.set_xlabel('')
ax1.set_ylabel('')
ax1.set_xticklabels([])
ax1.set_xticks([])
ax1.set_yticklabels([])
ax1.set_yticks([])
ax2.set_xticklabels([])
ax2.set_xticks([])
ax2.set_yticklabels([])
ax2.set_yticks([])
sns.plt.tight_layout()
sns.plt.savefig('../outputs/'+prefix+'_pca.pdf', bbox_inches=0)
plt.clf()
def natural_cluster(slice_matrix, slice_names, prefix='en'):
"""
Perform plain cluster analysis on sample matrix, without
taking into account the chronology of the corpus.
"""
slice_matrix = StandardScaler().fit_transform(slice_matrix)
dist_matrix = pairwise_distances(slice_matrix, metric='euclidean')
clusterer = Clusterer(dist_matrix, linkage='ward')
clusterer.cluster(verbose=0)
short_names = [l.split('_')[0][:5]+l.split('_')[1] for l in slice_names]
tree = clusterer.dendrogram.ete_tree(short_names)
tree.write(outfile='../outputs/'+prefix+'_natural_clustering.newick')
def vnc_cluster(slice_matrix, slice_names, prefix='en'):
slice_matrix = StandardScaler().fit_transform(slice_matrix)
dist_matrix = pairwise_distances(slice_matrix, metric='euclidean')
clusterer = VNClusterer(dist_matrix, linkage='ward')
clusterer.cluster(verbose=0)
short_names = [l.split('_')[0][:5]+l.split('_')[1] for l in slice_names]
t = clusterer.dendrogram.ete_tree(short_names)
t.write(outfile='../outputs/'+prefix+"_vnc_clustering.newick")
def segment_cluster(slice_matrix, slice_names, nb_segments):
slice_matrix = StandardScaler().fit_transform(slice_matrix)
slice_ma | trix = np.asarray(slice_matrix).transpose() # librosa assumes that data[1] = time axis
segment_starts = agglomerative(data=slice_matrix, k=nb_segments)
break_points = []
for i in segment_starts:
if i > 0: # skip first one, since it's always a segm start!
break_points.append(slice_names[i])
return(break_points)
def bootstrap_segmentation(n_iter, nb_mfw_sampled, corpus_matrix,
| slice_names, prefix='en', nb_segments=3, random_state=2015):
np.random.seed(random_state)
corpus_matrix = np.asarray(corpus_matrix)
sample_cnts = OrderedCounter()
for sn in slice_names:
sample_cnts[sn] = []
for i in range(nb_segments):
sample_cnts[sn].append(0)
for nb in range(n_iter):
print('===============\niteration:', nb+1)
# sample a subset of the features in our matrix:
rnd_indices = np.random.randint(low=0, high=corpus_matrix.shape[1], size=nb_mfw_sampled)
sampled_matrix = corpus_matrix[:,rnd_indices]
# get which breaks are selected and adjust the cnts:
selected_breaks = segment_cluster(sampled_matrix, slice_names, nb_segments=nb_segments)
for i, break_ in enumerate(selected_breaks):
sample_cnts[break_][i] += 1
plt.rcParams['font.family'] = 'arial'
plt.rcParams['font.size'] = 8
plt.clf()
plt.figure(figsize=(10,20))
sample_names, breakpoints_cnts = zip(*sample_cnts.items())
pos = [i for i, n in enumerate(sample_names)][::-1] # reverse for legibility
plt.yticks(pos, [n[:3].replace('_', '') if n.endswith(('_1', '_0')) else ' ' for n in sample_names])
axes = plt.gca()
axes.set_xlim([0,n_iter])
colors = sns.color_palette('hls', nb_segments)
for i in range(nb_segments-1):
cnts = [c[i] for c in breakpoints_cnts]
plt.barh(pos, cnts, align='center', color=colors[i], linewidth=0, label="Boundary "+str(i+1))
plt.tick_params(axis='y', which='both', left='off', right='off', labelleft='on')
plt.tick_params(axis='x', which='both', top='off')
plt.legend()
plt.savefig('../outputs/'+prefix+'_bootstrap_segment'+str(nb_segments)+'.pdf')
|
from pycipher import Vigenere
import unittest
class TestVigenere(unittest.TestCase):
def test_encipher(self):
keys = ('GERMAN',
'CIPHERS')
plaintext = ('abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz',
| 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz')
ciphertext = ('gftpesmlzvkysrfbqeyxlhwkedrncqkjxtiwqpdzocwvjfuicbpl',
'cjrkiwyjqyrpdfqxfywkmxemfdrteltmkyalsatrfhszhaymozgo')
for | i,key in enumerate(keys):
enc = Vigenere(key).encipher(plaintext[i])
self.assertEqual(enc.upper(), ciphertext[i].upper())
def test_decipher(self):
keys = ('GERMAN',
'CIPHERS')
plaintext= ('abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz',
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz')
ciphertext = ('gftpesmlzvkysrfbqeyxlhwkedrncqkjxtiwqpdzocwvjfuicbpl',
'cjrkiwyjqyrpdfqxfywkmxemfdrteltmkyalsatrfhszhaymozgo')
for i,key in enumerate(keys):
dec = Vigenere(key).decipher(ciphertext[i])
self.assertEqual(dec.upper(), plaintext[i].upper())
if __name__ == '__main__':
unittest.main()
|
import unicodecsv
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.utils.encoding import force_text
from six.moves import map
from oioioi.base.permissions import make_request_condition
from oioioi.base.utils import re | quest_cached
from oioioi.participants.controllers import ParticipantsController
from oioioi.participants.models import Participant
def is_contest_with_participants(contest):
rcontroller = contest.controller.registration_co | ntroller()
return isinstance(rcontroller, ParticipantsController)
def is_onsite_contest(contest):
if not is_contest_with_participants(contest):
return False
from oioioi.participants.admin import OnsiteRegistrationParticipantAdmin
rcontroller = contest.controller.registration_controller()
padmin = rcontroller.participant_admin
return padmin and issubclass(padmin, OnsiteRegistrationParticipantAdmin)
@make_request_condition
def contest_has_participants(request):
return is_contest_with_participants(request.contest)
@make_request_condition
def has_participants_admin(request):
rcontroller = request.contest.controller.registration_controller()
return getattr(rcontroller, 'participant_admin', None) is not None
@make_request_condition
def contest_is_onsite(request):
return is_onsite_contest(request.contest)
@request_cached
def get_participant(request):
try:
return Participant.objects.get(contest=request.contest, user=request.user)
except Participant.DoesNotExist:
return None
@make_request_condition
@request_cached
def can_register(request):
if get_participant(request) is not None:
return False
rcontroller = request.contest.controller.registration_controller()
return rcontroller.can_register(request)
@make_request_condition
@request_cached
def can_edit_registration(request):
participant = get_participant(request)
if participant is None:
return False
rcontroller = request.contest.controller.registration_controller()
return rcontroller.can_edit_registration(request, participant)
@make_request_condition
@request_cached
def can_unregister(request):
participant = get_participant(request)
if participant is None:
return False
rcontroller = request.contest.controller.registration_controller()
return rcontroller.can_unregister(request, participant)
@make_request_condition
@request_cached
def is_participant(request):
rcontroller = request.contest.controller.registration_controller()
qs = User.objects.filter(id=request.user.id)
return rcontroller.filter_participants(qs).exists()
def _fold_registration_models_tree(object):
"""Function for serialize_participants_data. Walks over model of
the object, gets models related to the model and lists
all their fields."""
result = []
objects_used = [object]
# https://docs.djangoproject.com/en/1.9/ref/models/meta/#migrating-old-meta-api
def get_all_related_objects(_meta):
return [
f
for f in _meta.get_fields()
if (f.one_to_many or f.one_to_one) and f.auto_created and not f.concrete
]
objs = [
getattr(object, rel.get_accessor_name())
for rel in get_all_related_objects(object._meta)
if hasattr(object, rel.get_accessor_name())
]
while objs:
current = objs.pop(0)
if current is None:
continue
objects_used.append(current)
for field in current._meta.fields:
if (
field.remote_field is not None
and getattr(current, field.name) not in objects_used
):
objs.append(getattr(current, field.name))
for obj in objects_used:
for field in obj._meta.fields:
if not field.auto_created:
if field.remote_field is None:
result += [(obj, field)]
return result
def serialize_participants_data(request, participants):
"""Serializes all personal data of participants to a table.
:param participants: A QuerySet from table participants.
"""
if not participants.exists():
return {'no_participants': True}
display_email = request.contest.controller.show_email_in_participants_data
keys = ['username', 'user ID', 'first name', 'last name'] + (
['email address'] if display_email else []
)
def key_name(attr):
(obj, field) = attr
return str(obj.__class__.__name__) + ": " + field.verbose_name.title()
set_of_keys = set(keys)
for participant in participants:
for key in map(key_name, _fold_registration_models_tree(participant)):
if key not in set_of_keys:
set_of_keys.add(key)
keys.append(key)
def key_value(attr):
(obj, field) = attr
return (key_name((obj, field)), field.value_to_string(obj))
data = []
for participant in participants:
values = dict(list(map(key_value, _fold_registration_models_tree(participant))))
values['username'] = participant.user.username
values['user ID'] = participant.user.id
values['first name'] = participant.user.first_name
values['last name'] = participant.user.last_name
if display_email:
values['email address'] = participant.user.email
data.append([values.get(key, '') for key in keys])
return {'keys': keys, 'data': data}
def render_participants_data_csv(request, participants, name):
data = serialize_participants_data(request, participants)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s-%s.csv' % (
name,
"personal-data",
)
if 'no_participants' not in data:
writer = unicodecsv.writer(response)
writer.writerow(list(map(force_text, data['keys'])))
for row in data['data']:
writer.writerow(list(map(force_text, row)))
return response
|
"""
Support for Tellstick lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.tellstick/
"""
from homeassistant.components import tellstick
from homeassistant.components.light import ATTR_BRIGHTNESS, Light
from homeassistant.components.tellstick import (DEFAULT_SIGNAL_REPETITIONS,
| ATTR_DISCOVER_DEVICES,
ATTR_DISCOVER_CONFIG)
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup Tellstick lights."""
if (discovery_info is None or
discovery_info[ATTR_DISCOVER_DEVICES] is None or
tellstick.TELLCORE_REGISTRY is None):
return
signal_repet | itions = discovery_info.get(ATTR_DISCOVER_CONFIG,
DEFAULT_SIGNAL_REPETITIONS)
add_devices(TellstickLight(
tellstick.TELLCORE_REGISTRY.get_device(switch_id), signal_repetitions)
for switch_id in discovery_info[ATTR_DISCOVER_DEVICES])
class TellstickLight(tellstick.TellstickDevice, Light):
"""Representation of a Tellstick light."""
def __init__(self, tellstick_device, signal_repetitions):
"""Initialize the light."""
self._brightness = 255
tellstick.TellstickDevice.__init__(self,
tellstick_device,
signal_repetitions)
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
def set_tellstick_state(self, last_command_sent, last_data_sent):
"""Update the internal representation of the switch."""
from tellcore.constants import TELLSTICK_TURNON, TELLSTICK_DIM
if last_command_sent == TELLSTICK_DIM:
if last_data_sent is not None:
self._brightness = int(last_data_sent)
self._state = self._brightness > 0
else:
self._state = last_command_sent == TELLSTICK_TURNON
def _send_tellstick_command(self, command, data):
"""Handle the turn_on / turn_off commands."""
from tellcore.constants import (TELLSTICK_TURNOFF, TELLSTICK_DIM)
if command == TELLSTICK_TURNOFF:
self.tellstick_device.turn_off()
elif command == TELLSTICK_DIM:
self.tellstick_device.dim(self._brightness)
else:
raise NotImplementedError(
"Command not implemented: {}".format(command))
def turn_on(self, **kwargs):
"""Turn the switch on."""
from tellcore.constants import TELLSTICK_DIM
brightness = kwargs.get(ATTR_BRIGHTNESS)
if brightness is not None:
self._brightness = brightness
self.call_tellstick(TELLSTICK_DIM, self._brightness)
def turn_off(self, **kwargs):
"""Turn the switch off."""
from tellcore.constants import TELLSTICK_TURNOFF
self.call_tellstick(TELLSTICK_TURNOFF)
|
eld(max_length=40, editable=False, unique=True)
commit_time = models.DateTimeField(
db_index=True,
null=True, blank=True
)
commit_message = models.CharField(
max_length=150,
editable=False,
null=True, blank=True
)
travis_raw = models.TextField(null=True, blank=True)
log = models.TextField(null=True, blank=True)
class Meta:
verbose_name = _("Site update")
verbose_name_plural = _("Site updates")
@property
def travis_raw_pretty(self):
if self.travis_raw:
parsed = json.loads(self.travis_raw)
return json.dumps(parsed, indent=4, sort_keys=True)
else:
return ""
@property
def length(self):
if self.finished and self.started:
return self.finished-self.started
else:
return None
def __str__(self):
return self.sha1
class AddedChanged(models.Model):
added = models.DateTimeField(
auto_now_add=True,
db_index=True,
# default=now,
)
changed = models.DateTimeField(
auto_now=True,
db_index=True,
# default=now
)
# , editable=False
class Meta:
abstract = True
class UserManager(BaseUserManager):
def create_user(self, email, username=None, password=None):
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
username=username,
is_staff=False,
is_active=True,
is_superuser=False,
last_login=now(),
date_joined=now()
)
user.set_password(password)
user.save(using=self._db)
return user
def random(self):
"""TODO"""
# there can be deleted items
with transaction.atomic():
count = self.aggregate(count=Count('id'))['count']
random_index = randint(0, count - 1)
return self.all()[random_index]
def create_superuser(self, email, username, password):
user = self.create_user(email, username, password)
user.is_active = True
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
objects = UserManager()
USERNAME_FIELD = 'email'
email = models.EmailField(
verbose_name='Email',
max_length=255,
unique=True,
db_index=True,
blank=True, null=True,
default=None,
)
username = models.CharField(
max_length=200,
db_index=True,
# unique=True,
default='',
blank=True, null=True,
help_text=_("This is an unique identifier, not actual username. Can be a session \
key for temporary users")
)
# is_superuser = models.BooleanField(default=False)
is_staff = models.BooleanField(
default=False,
help_text=_("Designates whether this user can access the admin site.")
)
is_active = models.BooleanField(default=True)
date_joined = models.DateTimeField(auto_now_add=True, db_index=True)
first_name = models.CharField(
max_length=200,
blank=True, null=True,
)
last_name = models.CharField(
max_length=200,
blank=True, null=True,
)
date_last_pa | ss_sent = models.DateTimeField(null=True)
skype = models.CharField(max_length=200, blank=True, null=True)
discord = models.CharField(max_length=200, blank=True, null=True)
phone = models.CharField(max_length=200, blank=True, null=True)
city = models.CharField(max_length=200, blank=True, null=True)
browser_on_creation = models.CharField(
max_length=300,
db_index=True,
| default=None,
blank=True, null=True,
help_text=_("Browser string used when this user was created")
)
created_from_ip = models.GenericIPAddressField(blank=True, null=True)
timezone_str = models.CharField(
max_length=50,
db_index=True,
default='UTC',
)
# avatar = models.ForeignKey(
# 'images.Image',
# null=True,
# blank=True,
# # help_text=_("Avatar image")
# )
permissions = models.ManyToManyField(
Permission,
related_name="permissions",
blank=True
)
groups = models.ManyToManyField(
Group,
related_name="groups",
blank=True
)
telegram_chat_id = models.IntegerField(
blank=True, null=True,
)
class Meta:
verbose_name = _("User")
verbose_name_plural = _("Users")
def gravatar(self, size_in_px=25):
"""Return authorized social accounts"""
return get_gravatar_url(self.email, size=size_in_px)
# @property
# def social_accounts(self):
# """Return authorized social accounts"""
# return UserSocialAuth.objects.filter(user=self)
@property
def is_lazy(self):
return True
# return is_lazy_user(self)
def get_full_name(self):
"Used in Admin. Dajngo wants this to be defined."
return "{} {}".format(self.first_name, self.last_name)
def get_short_name(self):
"Used in Admin. Dajngo wants this to be defined."
return self.email
def __str__(self):
# if self.is_lazy:
# return "{}".format(_('Anonymous'))
if self.first_name:
return self.first_name
elif self.email:
return self.email
else:
return "User {}".format(self.pk)
# pip install django-mptt
class Tree(MPTTModel):
parent = TreeForeignKey(
'self',
default=None,
null=True,
blank=True,
db_index=True,
# related_name="%(app_label)s_%(class)s_parent",
# related_name="%(app_label)s_%(class)s_children",
related_name='children',
verbose_name=_("Parent element"),
on_delete=models.SET_NULL,
)
class Meta:
abstract = True
class Comment(Tree):
author = models.ForeignKey(
'core.User',
default=None,
null=True,
blank=True,
on_delete=models.SET_NULL,
)
src = models.TextField()
class LoginAttempt(models.Model):
'''
A login attempt record (both successful and not).
If user field is set then login was successful.
Instead login and password fields are set.
'''
# https://docs.python.org/3/library/ipaddress.html
# inet = InetAddressField(primary_key=True)
ip = InetAddressField()
login = models.CharField(
max_length=260,
null=True, blank=True,
)
password = models.CharField(
max_length=260,
null=True, blank=True,
)
user = models.ForeignKey(
'core.User',
default=None,
null=True,
blank=True,
on_delete=models.SET_NULL,
)
time = models.DateTimeField(
auto_now_add=True,
db_index=True,
null=True, blank=True,
)
# success = models.BooleanField(default=False)
objects = NetManager()
class Unnest(Func):
function = 'UNNEST'
class IP(models.Model):
# https://docs.python.org/3/library/ipaddress.html
# inet = InetAddressField(primary_key=True)
inet = InetAddressField()
open_ports = ArrayField(
models.IntegerField(),
blank=True,
null=True
)
objects = NetManager()
class Meta:
verbose_name = _('IP')
verbose_name_plural = _('IP-addresses')
@classmethod
def stat(cls):
"""Return Port and how many IPs have it open"""
return cls.objects \
.annotate(port=Unnest('open_ports', distinct=True)) \
.values('port') \
.annotate(count=Count('port')) \
.order_by('-count', '-port')
@classmethod
def with_open_ports(cls, ports):
"""Return Port and how many IPs have it open"""
return cls.objects.filter(open_ports__contains=ports)
def __str__(self):
# from django.contrib.postgres.agg |
from .widget_svg_layout import SVGLayoutBox
from .widget | _fullscreen im | port FullscreenBox |
"""
WSGI config for Bilyric project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Dj | ango WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
if os.environ.get( | 'DJANGO_SETTINGS_MODULE') == 'config.settings.production':
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
= self.load_policy(
{
"name": "copy-related-tags",
"resource": "aws.ebs-snapshot",
"filters": [{"tag:Test": "Test"}],
"actions": [
{
"type": "copy-related-tag",
"resource": "ebs",
"key": "VolumeId",
"tags": "*"
}
]
},
session_factory=factory
)
try:
resources = p.run()
except ClientError:
# it should filter missing volume and not throw an error
self.fail("This should have been handled in ErrorHandler.extract_bad_volume")
self.assertEqual(len(resources), 1)
try:
factory().client("ec2").describe_volumes(
VolumeIds=[resources[0]["VolumeId"]]
)
except ClientError as e:
# this should not filter missing volume and will throw an error
msg = e.response["Error"]["Message"]
err = e.response["Error"]["Code"]
self.assertEqual(err, "InvalidVolume.NotFound")
self.assertEqual(msg, f"The volume '{resources[0]['VolumeId']}' does not exist.")
class SnapshotAccessTest(BaseTest):
def test_snapshot_access(self):
# pre conditions, 2 snapshots one shared to a separate account, and one
# shared publicly. 2 non matching volumes, one not shared, one shared
# explicitly to its own account.
self.patch(CopySnapshot, "executor_factory", MainThreadExecutor)
factory = self.replay_flight_data("test_ebs_cross_account")
p = self.load_policy(
{
"name": "snap-copy",
"resource": "ebs-snapshot",
"filters": ["cross-account"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 2)
self.assertEqual(
{r["SnapshotId"]: r["c7n:CrossAccountViolations"] for r in resources},
{"snap-7f9496cf": ["619193117841"], "snap-af0eb71b": ["all"]},
)
class SnapshotDetachTest(BaseTest):
def test_volume_detach(self):
factory = self.replay_flight_data('test_ebs_detach')
p = self.load_policy(
{
| 'name': 'volume-detach',
'resource': 'ebs',
'filters': [{'VolumeId': 'vol-0850cf7c8e949c318'}],
'actions': [
{
'type': 'detach'
}
]
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = fact | ory(region="us-east-1").client('ec2')
volumelist = []
volumelist.append(resources[0]['VolumeId'])
response = client.describe_volumes(VolumeIds=volumelist)
for resp in response['Volumes']:
for attachment in resp['Attachments']:
self.assertTrue(attachment['State'] == "detached" or
attachment['State'] == "detaching")
class SnapshotCopyTest(BaseTest):
def test_snapshot_copy(self):
self.patch(CopySnapshot, "executor_factory", MainThreadExecutor)
self.change_environment(AWS_DEFAULT_REGION="us-west-2")
factory = self.replay_flight_data("test_ebs_snapshot_copy")
p = self.load_policy(
{
"name": "snap-copy",
"resource": "ebs-snapshot",
"filters": [{"tag:ASV": "RoadKill"}],
"actions": [
{
"type": "copy",
"target_region": "us-east-1",
"target_key": "82645407-2faa-4d93-be71-7d6a8d59a5fc",
}
],
},
config=dict(region="us-west-2"),
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = factory(region="us-east-1").client("ec2")
tags = client.describe_tags(
Filters=[
{"Name": "resource-id", "Values": [resources[0]["c7n:CopiedSnapshot"]]}
]
)[
"Tags"
]
tags = {t["Key"]: t["Value"] for t in tags}
self.assertEqual(tags["ASV"], "RoadKill")
class SnapshotAmiSnapshotTest(BaseTest):
def test_snapshot_ami_snapshot_filter(self):
self.patch(CopySnapshot, "executor_factory", MainThreadExecutor)
# DEFAULT_REGION needs to be set to west for recording
factory = self.replay_flight_data("test_ebs_ami_snapshot_filter")
# first case should return only resources that are ami snapshots
p = self.load_policy(
{
"name": "ami-snap-filter",
"resource": "ebs-snapshot",
"filters": [{"type": "skip-ami-snapshots", "value": False}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 3)
# second case should return resources that are NOT ami snapshots
policy = self.load_policy(
{
"name": "non-ami-snap-filter",
"resource": "ebs-snapshot",
"filters": [{"type": "skip-ami-snapshots", "value": True}],
},
session_factory=factory,
)
resources = policy.run()
self.assertEqual(len(resources), 2)
class SnapshotUnusedTest(BaseTest):
def test_snapshot_unused(self):
factory = self.replay_flight_data("test_ebs_snapshot_unused")
p = self.load_policy(
{
"name": "snap-unused",
"resource": "ebs-snapshot",
"filters": [{"type": "unused", "value": True}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
policy = self.load_policy(
{
"name": "snap-used",
"resource": "ebs-snapshot",
"filters": [{"type": "unused", "value": False}],
},
session_factory=factory,
)
resources = policy.run()
self.assertEqual(len(resources), 2)
class SnapshotTrimTest(BaseTest):
def test_snapshot_trim(self):
factory = self.replay_flight_data("test_ebs_snapshot_delete")
p = self.load_policy(
{
"name": "snapshot-trim",
"resource": "ebs-snapshot",
"filters": [{"tag:InstanceId": "not-null"}],
"actions": ["delete"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
class AttachedInstanceTest(BaseTest):
def test_ebs_instance_filter(self):
factory = self.replay_flight_data("test_ebs_instance_filter")
p = self.load_policy(
{
"name": "attached-instance-test",
"resource": "ebs",
"filters": [
{"type": "instance", "key": "tag:Name", "value": "CompiledLambda"}
],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
class ResizeTest(BaseTest):
def test_resize_action(self):
factory = self.replay_flight_data("test_ebs_modifyable_action")
client = factory().client("ec2")
# Change a volume from 32 gb gp2 and 100 iops (sized based) to
# 64gb and 500 iops.
vol_id = "vol-0073dcd216489ea1b"
p = self.load_policy(
{
"name": "resizable",
"resource": "ebs",
"filters": ["modifyable", {"VolumeId": vol_id}],
"actions": [
{
"type": "modify",
"volume-type": "io1",
"size-percent": 200,
"iops-percent": 500,
|
# -*- coding: utf-8 -*-
__all__ = ['host_blueprint']
from datetime import datetime, timedelta
# dateutil
from dateutil.parser import parse as dtparse
# flask
from flask import (
Flask, request, session, g,
redirect, url_for, abort,
render_template, flash, jsonify,
Blueprint, abort,
send_from_directory,
current_app,
)
# flask login
from flask.ext.login import login_required, fresh_login_required, current_user
# flask-wtf
from flask.ext.wtf import Form
from wtforms import validators
from wtforms import TextField, PasswordField, SelectField, BooleanField
from wtforms_html5 import EmailField
# requests
import requests
from requests.auth import HTTPBasicAuth
# model
from model.db import db
from model.db import object_to_dict, objects_to_list, update_object_with_dict
from model.user import UserAccount, UserQuota
from model.host import Host
host_blueprint = Blueprint('host_blueprint', __name__)
@host_blueprint.route('/hosts', methods=['GET'])
@login_required
def host_hosts():
username = current_user.username
print 'host_hosts:', locals()
# get user account properties
user_account = UserAccount.query.filter_by(username=username).one()
dct = object_to_dict(user_account)
return render_template(
'host-hosts.html',
**dct
)
@host_blueprint.route('/hosts/all', methods=['POST'])
@login_required
def host_hosts_all():
username = current_user.username
usertype = current_user.usertype
print 'host_hosts_all:', locals()
if usertype != 'super':
data = {}
return jsonify(data)
hosts = Host.query.all()
_hosts = objects_to_list(hosts)
data = {
'hosts': _hosts,
}
return jsonify(data)
@host_blueprint.route('/host/create', methods=['POST'])
@login_required
def host_create():
username = current_user.username
usertype = current_user.usertype
_host = request.json['host']
print 'host_add:', lo | cals()
if usertype != 'super':
data = {}
return jsonify(data)
name = _host['name']
host = _host['host']
port = _host['port']
auth_username = _ | host['auth_username']
auth_password = _host['auth_password']
ram_capacity = _host['ram_capacity']
ram_reserved = _host['ram_reserved']
if '[' in name and '-' in name and ']' in name and \
'[' in host and '-' in host and ']' in host:
_hosts = []
hosts = []
# name base/range
s = name.find('[')
e = name.find(']')
name_base = name[:s]
name_range = name[s + 1:e]
name_range = name_range.strip(' ').strip()
name_range = map(int, name_range.split('-'))
name_range[1] += 1
# host base/range
s = host.find('[')
e = host.find(']')
host_base = host[:s]
host_range = host[s + 1:e]
host_range = host_range.strip(' ').strip()
host_range = map(int, host_range.split('-'))
host_range[1] += 1
for i, j in zip(range(*name_range), range(*host_range)):
__host = {
'name': '%s%i' % (name_base, i),
'host': '%s%i' % (host_base, j),
'port': port,
'auth_username': auth_username,
'auth_password': auth_password,
'ram_capacity': ram_capacity,
'ram_reserved': ram_reserved,
}
__host['created'] = __host['updated'] = datetime.utcnow()
host = Host(**__host)
db.session.add(host)
hosts.append(host)
db.session.commit()
for host in hosts:
__host = object_to_dict(host)
_hosts.append(__host)
data = {
'hosts': _hosts,
}
else:
_host['created'] = _host['updated'] = datetime.utcnow()
host = Host(**_host)
db.session.add(host)
db.session.commit()
_host = object_to_dict(host)
data = {
'host': _host,
}
return jsonify(data)
@host_blueprint.route('/host/update', methods=['POST'])
@login_required
def host_update():
username = current_user.username
usertype = current_user.usertype
_host = request.json['host']
print 'host_update:', locals()
if usertype != 'super':
data = {}
return jsonify(data)
host = Host.query.get(_host['id'])
assert host is not None
_host['updated'] = datetime.utcnow()
update_object_with_dict(host, _host)
db.session.commit()
_host = object_to_dict(host)
data = {
'host': _host,
}
return jsonify(data)
@host_blueprint.route('/host/remove', methods=['POST'])
@login_required
def host_remove():
username = current_user.username
usertype = current_user.usertype
id = request.json['id']
print 'host_remove:', locals()
if usertype != 'super':
data = {}
return jsonify(data)
host = Host.query.get(id)
assert host is not None
db.session.delete(host)
db.session.commit()
data = {}
return jsonify(data)
|
的交易数量是否大于order2
lt = order1 < order2 # order1唯一的交易数量是否小于order2
"""
def __init__(self, orders_pd, same_rule=EOrderSameRule.ORDER_SAME_BSPD):
"""
初始化函数需要pd.DataFrame对象,暂时未做类型检测
:param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param same_rule: order判断为是否相同使用的规则, 默认EOrderSameRule.ORDE | R_SAME_BSPD
即:order有相同的symbol和买入日期和相同的卖出日期和价格才认为是相同
"""
# 需要copy因为会添加orders_pd的列属性等
self.orders_pd = orders_pd.copy()
self.same_rule = same_rule
# 并集, 交集, 差集运算结果存储
self.op_result = None
self.last_op_metrics = {}
@contextmanager
def proxy_work(self, orders_pd):
"""
传人需要比较的orders_pd,构造ABuOrderPdProxy对象,返回使用者,
对op_result进行统一分析
:param | orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:return:
"""
# 运算集结果重置
self.op_result = None
# 实例化比较的ABuOrderPdProxy对象
other = AbuOrderPdProxy(orders_pd)
try:
yield self, other
finally:
if isinstance(self.op_result, pd.DataFrame):
# 如果有并集, 交集, 差集运算结果存储,
from ..MetricsBu.ABuMetricsBase import AbuMetricsBase
metrics = AbuMetricsBase(self.op_result, None, None, None)
metrics.fit_metrics_order()
self.last_op_metrics['win_rate'] = metrics.win_rate
self.last_op_metrics['gains_mean'] = metrics.gains_mean
self.last_op_metrics['losses_mean'] = metrics.losses_mean
self.last_op_metrics['sum_profit'] = self.op_result['profit'].sum()
self.last_op_metrics['sum_profit_cg'] = self.op_result['profit_cg'].sum()
def __and__(self, other):
""" & 操作符的重载,计算两个交易集的交集"""
# self.op = 'intersection(order1 & order2)'
self.op_result = intersection_in_2orders(self.orders_pd, other.orders_pd, same_rule=self.same_rule)
return self.op_result
def __or__(self, other):
""" | 操作符的重载,计算两个交易集的并集"""
# self.op = 'union(order1 | order2)'
self.op_result = union_in_2orders(self.orders_pd, other.orders_pd)
return self.op_result
def __sub__(self, other):
""" - 操作符的重载,计算两个交易集的差集"""
self.op_result = difference_in_2orders(self.orders_pd, other.orders_pd, same_rule=self.same_rule)
return self.op_result
def __eq__(self, other):
""" == 操作符的重载,计算两个交易集的是否相同"""
return (self - other).empty and (other - self).empty
def __gt__(self, other):
""" > 操作符的重载,计算两个交易集的大小, 类被total_ordering装饰,可以支持lt等操作符"""
unique_cnt = find_unique_group_symbol(self.orders_pd).shape[0]
other_unique_cnt = find_unique_group_symbol(other.orders_pd).shape[0]
return unique_cnt > other_unique_cnt
def union_in_2orders(orders_pd, other_orders_pd):
"""
并集:分析因子或者参数问题时使用,debug策略问题时筛选出两个orders_pd中所有不同的交易,
注意这里不认为在相同的交易日买入相同的股票,两笔交易就一样,这里只是两个orders_pd合并
后使用drop_duplicates做了去除完全一样的order,即结果为并集:
orders_pd | cmp_orders_pd或orders_pd.union(cmp_orders_pd)
:param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param other_orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:return: orders_pd | cmp_orders_pd
"""
orders_pd = orders_pd.append(other_orders_pd)
orders_pd = orders_pd.drop_duplicates()
return orders_pd
def _same_pd(order, other_orders_pd, same_rule):
"""
根据same_rule的规则从orders_pd和other_orders_pd中返回相同的df
:param order: orders_pd中的一行order记录数据
:param other_orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param same_rule: order判断为是否相同使用的规则
:return: 从orders_pd和other_orders_pd中返回相同的df
"""
symbol = order.symbol
buy_day = order['buy_date']
buy_price = order['buy_price']
sell_day = order['sell_date']
sell_price = order['sell_price']
if same_rule == EOrderSameRule.ORDER_SAME_BD:
# 只根据买入时间和买入symbol确定是否相同,即认为在相同的交易日买入相同的股票,两笔交易就一样,忽略其它所有order中的因素
same_pd = other_orders_pd[(other_orders_pd['symbol'] == symbol) & (other_orders_pd['buy_date'] == buy_day)]
elif same_rule == EOrderSameRule.ORDER_SAME_BSD:
# 根据买入时间,卖出时间和买入symbol确定是否相同
same_pd = other_orders_pd[(other_orders_pd['symbol'] == symbol) & (other_orders_pd['buy_date'] == buy_day)
& (other_orders_pd['sell_date'] == sell_day)]
elif same_rule == EOrderSameRule.ORDER_SAME_BDP:
# 根据买入时间,买入价格和买入symbol确定是否相同
same_pd = other_orders_pd[(other_orders_pd['symbol'] == symbol) & (other_orders_pd['buy_date'] == buy_day)
& (other_orders_pd['buy_price'] == buy_price)]
elif same_rule == EOrderSameRule.ORDER_SAME_BSPD:
# 根据买入时间,卖出时间, 买入价格和卖出价格和买入symbol确定是否相同
same_pd = other_orders_pd[(other_orders_pd['symbol'] == symbol) & (other_orders_pd['buy_date'] == buy_day)
& (other_orders_pd['sell_date'] == sell_day)
& (other_orders_pd['buy_price'] == buy_price)
& (other_orders_pd['sell_price'] == sell_price)]
else:
raise TypeError('same_rule type is {}!!'.format(same_rule))
return same_pd
def intersection_in_2orders(orders_pd, other_orders_pd, same_rule=EOrderSameRule.ORDER_SAME_BSPD):
"""
交集: 分析因子或者参数问题时使用,debug策略问题时筛选出两个orders_pd中相同的交易,
即结果为交集:orders_pd & cmp_orders_pd或orders_pd.intersection(cmp_orders_pd)
:param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param other_orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param same_rule: order判断为是否相同使用的规则, 默认EOrderSameRule.ORDER_SAME_BSPD
即:order有相同的symbol和买入日期和相同的卖出日期和价格才认为是相同
:return: orders_pd & cmp_orders_pd
"""
def _intersection(order):
same_pd = _same_pd(order, other_orders_pd, same_rule)
if same_pd.empty:
# 如果是空,说明不相交
return False
# 相交, intersection=1,是交集
return True
orders_pd['intersection'] = orders_pd.apply(_intersection, axis=1)
return orders_pd[orders_pd['intersection'] == 1]
def difference_in_2orders(orders_pd, other_orders_pd, same_rule=EOrderSameRule.ORDER_SAME_BSPD):
"""
差集: 分析因子或者参数问题时使用,debug策略问题时筛选出两个orders_pd的不同交易,
注意返回的结果是存在orders_pd中的交易,但不在cmp_orders_pd中的交易,即结果
为差集:orders_pd - cmp_orders_pd或orders_pd.difference(cmp_orders_pd)
:param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param other_orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param same_rule: order判断为是否相同使用的规则, 默认EOrderSameRule.ORDER_SAME_BSPD
即:order有相同的symbol和买入日期和相同的卖出日期和价格才认为是相同
:return: orders_pd - cmp_orders_pd
"""
def _difference(order):
same_pd = _same_pd(order, other_orders_pd, same_rule)
if same_pd.empty:
# 没有相同的说明是差集
return True
# 有相同的说明不是差集
return False
orders_pd['difference'] = orders_pd.apply(_difference, axis=1)
return orders_pd[orders_pd['difference'] == 1]
def find_unique_group_symbol(order_pd):
"""
按照'buy_date', 'symbol'分组后,只筛选组里的第一个same_group.iloc[0]
:param order_pd:
:return:
"""
def _find_unique_group_symbol(same_group):
# 只筛选组里的第一个, 即同一个交易日,对一个股票的交易只保留一个order
return same_group.iloc[0]
# 按照'buy_date', 'symbol'分组后apply same_handle
order_pds = order_pd.groupby(['buy_date', 'symbol']).apply(_find_unique_group_symbol)
return order_pds
def find_unique_symbol(order_pd, same_rule=EOrderSameRule.ORDER_SAME_BSPD):
"""
order_pd中如果一个buy_date对应的一个symbol有多条交易记录,过滤掉,
注意如果在对应多条记录中保留一个,使用find_unique_group_symbol
:param order_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param same_rule: order判断为是否相同使用的规则, 默认EOrderSameRule.ORDER_SAME_BSPD
即:order有相同的symbol和买入日期和相同的卖出日期和价格才认为是相同
"""
def _find_unique_symbol(order):
"""根据order的symbol和buy_date在原始order_pd中进行复合条件筛选,结果same_pd如果只有1个就唯一,否则就是重复的"""
same_pd = _same_pd(order, order_pd, same_rule)
if same_pd.empty or same_pd.shape[0] == 1:
return False
# 同一天一个symbol有多条记录的一个也没留,都过滤
return True
same_mark = order_pd.apply(_find_unique_symbol, axis=1)
return order_ |
from tabulate import tabulate
class Response():
message = None;
data = None;
def print(self):
if self.message:
if type(self.message) == "str":
print(self.message)
elif type(self.message) == "list":
for message in self.message:
| print("{}\n".format(message))
if (self.data):
| if len(self.data["rows"]) > 0:
print(tabulate(self.data["rows"], headers=self.data["headers"]))
else:
print("Empty!")
|
import rply
from ..lexer import lexers
__all__ = ('parsers',)
class Parsers(object):
def __init__(self):
self._fpg = None
self._fp = None
self._spg = None
self._sp = None
@property
def fpg(self):
if self._fpg is None:
self._fpg = rply.ParserGenerator(
[rule.name for rule in lexers.flg.rules],
precedence=[]
)
return self._fpg
@property
def fp(self):
if self._fp is None:
self._fp = self.fpg.build()
return self._fp
@property
def spg(self):
if self._spg is None:
self._spg | = rply.ParserGenerator(
[rule.name for rule in lexers.slg.rules] | ,
precedence=[]
)
return self._spg
@property
def sp(self):
if self._sp is None:
self._sp = self.spg.build()
return self._sp
parsers = Parsers()
# Load productions
from .filter import fpg # noqa
from .structure import spg # noqa
|
# -*- coding: utf-8 -*-
# (C) 2017 Muthiah Annamalai
# This file is part of open-tamil examples
# This code is released under public domain
import joblib
# Ref API help from : https://scikit-learn.org
import numpy as np
import random
import string
import time
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
# project modules
from classifier_eng_vs_ta import jaffna_transliterate
from preprocess import Feature
def data1(filename):
x = np.loadtxt(open(filename, "r"), delimiter=",")
y = np.ones(shape=(x.shape[0], 1))
return (x, y)
def data0(filename):
x = np.loadtxt(open(filename, "r"), delimiter=",")
y = np.zeros(shape=(x.shape[0], 1))
return (x, y)
DEBUG = False
x1, y1 = data1("tamilvu_dictionary_words.txt.csv")
x0, y0 = data0("english_dictionary_words.jaffna.csv")
az_x0, az_y0 = data0("english_dictionary_words.azhagi.csv")
cm_x0, cm_y0 = data0("english_dictionary_words.combinational.csv")
x1 = x1.take(range(0, x0.shape[0]), axis=0)
y1 = np.ones((x0.shape[0], 1))
## Scale the data for the training
X = np.concatenate((x0, x1), axis=0)
Y = np.concatenate((y0, y1), axis=0)
# Y = Y.take(range(0,X.shape[0]),axis=0).ravel()
Y = Y.ravel()
X_train, X_test, Y_train, Y_test = train_test_split(X, Y)
scaler = StandardScaler()
scaler.fit(X_train)
joblib.dump(scaler, "test_scaler.pkl") # scaler Dump for webapps
print("Size of Training set => %d" % X_train.shape[0])
print("Size of Test set => %d" % X_test.shape[0])
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
###########
## Build training set for the model
## solver='sgd',activation='logistic',
## We have a 4-layer model
nn = MLPClassifier(hidden_layer_sizes=(15, 15, 10, 5),
activation='logistic',
max_iter=100000, alpha=0.01, solver='lbfgs')
# Try 1-layer simple model with logistic activation
# nn = MLPClassifier(
# hidden_layer_sizes=(8, 8, 7), solver="lbfgs"
# ) # activation='logistic',max_iter=1000,early_stopping=True,solver='lbfgs')
# max_iter=500,solver='sgd',activation='logistic')
print(nn)
nn.fit(X_train, Y_train)
joblib.dump(
nn, "nn-%s.pkl" % time.ctime()
) # change dump name to test_nn.pkl for webapps
Y_pred = nn.predict(X_test)
print(" accuracy => ", accuracy_score(Y_pred.ravel(), Y_tes | t))
score = nn.score(X_test, Y_test)
print("Score => ")
print(score)
print(confusion_matrix(Y_test, Y_pred.ravel()))
print(classification_report(Y_test, Y_pred.ravel()))
def process_word(s):
if any([l in string.ascii_lowercase for l in s]):
s = jaffna_transliterate(s)
print(u"Transliterated to | %s" % s)
print(u"Checking in NN '%s'" % s)
try:
f = Feature.get(s)
scaled_feature = scaler.transform(np.array(f.data()).reshape(1, -1))
y = nn.predict(scaled_feature)
print(scaled_feature)
print(y)
if y.ravel() > 0:
print(u"%s -> TAMIL world (most likely)" % s)
else:
print(u"%s -> ENG word (most likely)" % s)
except Exception as ioe:
print("SKIPPING => ", ioe.message)
return
for w in [
u"hello",
u"ஆரொன்",
u"உகந்த",
u"கம்புயுடர்",
u"கம்ப்யூட்டர்",
u"பியூடிபுல்",
"pupil",
"beautiful",
"summer",
"sinful",
"google",
"facebook",
"microsoft",
"swift",
]:
process_word(w)
while True:
s = input(u">> ").decode("utf-8")
s = s.strip().lower()
if s == "end":
break
if len(s) < 1:
continue
process_word(s)
|
les serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_trial" not in self._stubs:
self._stubs["create_trial"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.VizierService/CreateTrial",
request_serializer=vizier_service.CreateTrialRequest.serialize,
response_deserializer=study.Trial.deserialize,
)
return self._stubs["create_trial"]
@property
def get_trial(
self,
) -> Callable[[vizier_service.GetTrialRequest], Awaitable[study.Trial]]:
r"""Return a callable for the get trial method over gRPC.
Gets a Trial.
Returns:
Callable[[~.GetTrialRequest],
Awaitable[~.Trial]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_trial" not in self._stubs:
self._stubs["get_trial"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.VizierService/GetTrial",
request_serializer=vizier_service.GetTrialRequest.serialize,
response_deserializer=study.Trial.deserialize,
)
return self._stubs["get_trial"]
@property
def list_trials(
self,
) -> Callable[
[vizier_service.ListTrialsRequest], Awaitable[vizier_service.ListTrialsResponse]
]:
r"""Return a callable for the list trials method over gRPC.
Lists the Trials associated with a Study.
Returns:
Callable[[~.ListTrialsRequest],
Awaitable[~.ListTrialsResponse]]:
A function that, when called, will call the underlying RPC
| on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
| # gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_trials" not in self._stubs:
self._stubs["list_trials"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.VizierService/ListTrials",
request_serializer=vizier_service.ListTrialsRequest.serialize,
response_deserializer=vizier_service.ListTrialsResponse.deserialize,
)
return self._stubs["list_trials"]
@property
def add_trial_measurement(
self,
) -> Callable[[vizier_service.AddTrialMeasurementRequest], Awaitable[study.Trial]]:
r"""Return a callable for the add trial measurement method over gRPC.
Adds a measurement of the objective metrics to a
Trial. This measurement is assumed to have been taken
before the Trial is complete.
Returns:
Callable[[~.AddTrialMeasurementRequest],
Awaitable[~.Trial]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "add_trial_measurement" not in self._stubs:
self._stubs["add_trial_measurement"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.VizierService/AddTrialMeasurement",
request_serializer=vizier_service.AddTrialMeasurementRequest.serialize,
response_deserializer=study.Trial.deserialize,
)
return self._stubs["add_trial_measurement"]
@property
def complete_trial(
self,
) -> Callable[[vizier_service.CompleteTrialRequest], Awaitable[study.Trial]]:
r"""Return a callable for the complete trial method over gRPC.
Marks a Trial as complete.
Returns:
Callable[[~.CompleteTrialRequest],
Awaitable[~.Trial]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "complete_trial" not in self._stubs:
self._stubs["complete_trial"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.VizierService/CompleteTrial",
request_serializer=vizier_service.CompleteTrialRequest.serialize,
response_deserializer=study.Trial.deserialize,
)
return self._stubs["complete_trial"]
@property
def delete_trial(
self,
) -> Callable[[vizier_service.DeleteTrialRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete trial method over gRPC.
Deletes a Trial.
Returns:
Callable[[~.DeleteTrialRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_trial" not in self._stubs:
self._stubs["delete_trial"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.VizierService/DeleteTrial",
request_serializer=vizier_service.DeleteTrialRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_trial"]
@property
def check_trial_early_stopping_state(
self,
) -> Callable[
[vizier_service.CheckTrialEarlyStoppingStateRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the check trial early stopping
state method over gRPC.
Checks whether a Trial should stop or not. Returns a
long-running operation. When the operation is successful, it
will contain a
[CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse].
Returns:
Callable[[~.CheckTrialEarlyStoppingStateRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "check_trial_early_stopping_state" not in self._stubs:
self._stubs[
"check_trial_early_stopping_state"
] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.VizierService/CheckTrialEarlyStoppingState",
request_serializer=vizier_service.CheckTrialEarlyStoppingStateRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["check_trial_early_stopping_state"]
@property
def stop_trial(
self,
) -> Callable[[vizier_service.StopTrialRequest], Awaitable[study.Trial]]:
r"""Return a callable for the stop trial method over gRPC.
Stops a Trial.
Returns:
Callable[[~.StopTrialRequest],
Awaitable[~.Trial]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the reques |
"""Response classes used by urllib.
The base class, addbase, defines a minimal file-like interface,
including read() and readline(). The typical response object is an
addinfourl instance, which defines an info() method that returns
headers and a geturl() method that returns the url.
"""
class addbase(object):
"""Base class for addinfo and addclosehook."""
# XXX Add a method to expose the timeo | ut on the underlying socket?
def __init__(self, fp):
# TODO(jhylton): Is there a better way to delegate using io?
self.fp = fp
self.read = self.fp.read
self.readline = self.fp.readline
# TODO(jhylton): Make sure an object with readlines() is also iterable
if hasattr(self.fp, "readlines"):
self.readlines = self.fp.readlines
if hasattr(self.fp, "fileno"):
self. | fileno = self.fp.fileno
else:
self.fileno = lambda: None
def __iter__(self):
# Assigning `__iter__` to the instance doesn't work as intended
# because the iter builtin does something like `cls.__iter__(obj)`
# and thus fails to find the _bound_ method `obj.__iter__`.
# Returning just `self.fp` works for built-in file objects but
# might not work for general file-like objects.
return iter(self.fp)
def __repr__(self):
return '<%s at %r whose fp = %r>' % (self.__class__.__name__,
id(self), self.fp)
def close(self):
if self.fp:
self.fp.close()
self.fp = None
self.read = None
self.readline = None
self.readlines = None
self.fileno = None
self.__iter__ = None
self.__next__ = None
def __enter__(self):
if self.fp is None:
raise ValueError("I/O operation on closed file")
return self
def __exit__(self, type, value, traceback):
self.close()
class addclosehook(addbase):
"""Class to add a close hook to an open file."""
def __init__(self, fp, closehook, *hookargs):
addbase.__init__(self, fp)
self.closehook = closehook
self.hookargs = hookargs
def close(self):
if self.closehook:
self.closehook(*self.hookargs)
self.closehook = None
self.hookargs = None
addbase.close(self)
class addinfo(addbase):
"""class to add an info() method to an open file."""
def __init__(self, fp, headers):
addbase.__init__(self, fp)
self.headers = headers
def info(self):
return self.headers
class addinfourl(addbase):
"""class to add info() and geturl() methods to an open file."""
def __init__(self, fp, headers, url, code=None):
addbase.__init__(self, fp)
self.headers = headers
self.url = url
self.code = code
def info(self):
return self.headers
def getcode(self):
return self.code
def geturl(self):
return self.url
|
import json
from django.db.models import Q, Subquery
from django.core.management.base import BaseCommand
from readthedocs.oauth.models import RemoteRepository
from readthedocs.oauth.services import registry
from readthedocs.oauth.services.base import SyncServiceError
from readthedocs.projects.models import Project
from readthedocs.organizations.models import Organization
class Command(BaseCommand):
help = "Re-connect RemoteRepository to Project"
def add_arguments(self, parser):
parser.add_argument('organization', nargs='+', type=str)
parser.add_argument(
'--no-dry-run',
action='store_true',
default=False,
help='Update database with the changes proposed | .',
)
# If owners does not have their RemoteRepository synced, it could
# happen we don't find a matching Project (see --force-owners-social-resync)
parser.add_argument(
'--only-owners',
action='store_true',
default=False,
help='Connect repo | sitories only to organization owners.',
)
parser.add_argument(
'--force-owners-social-resync',
action='store_true',
default=False,
help='Force to re-sync RemoteRepository for organization owners.',
)
def _force_owners_social_resync(self, organization):
for owner in organization.owners.all():
for service_cls in registry:
for service in service_cls.for_user(owner):
try:
service.sync()
except SyncServiceError:
print(f'Service {service} failed while syncing. Skipping...')
def _connect_repositories(self, organization, no_dry_run, only_owners):
connected_projects = []
# TODO: consider using same login than RemoteRepository.matches method
# https://github.com/readthedocs/readthedocs.org/blob/49b03f298b6105d755554f7dc7e97a3398f7066f/readthedocs/oauth/models.py#L185-L194
remote_query = (
Q(ssh_url__in=Subquery(organization.projects.values('repo'))) |
Q(clone_url__in=Subquery(organization.projects.values('repo')))
)
for remote in RemoteRepository.objects.filter(remote_query).order_by('created'):
admin = json.loads(remote.json).get('permissions', {}).get('admin')
if only_owners and remote.users.first() not in organization.owners.all():
# Do not connect a RemoteRepository if the User is not owner of the organization
continue
if not admin:
# Do not connect a RemoteRepository where the User is not admin of the repository
continue
if not organization.users.filter(username=remote.users.first().username).exists():
# Do not connect a RemoteRepository if the use does not belong to the organization
continue
# Projects matching
# - RemoteRepository URL
# - are under the Organization
# - not connected to a RemoteRepository already
# - was not connected previously by this call to the script
projects = Project.objects.filter(
Q(repo=remote.ssh_url) | Q(repo=remote.clone_url),
organizations__in=[organization.pk],
remote_repository__isnull=True
).exclude(slug__in=connected_projects)
for project in projects:
connected_projects.append(project.slug)
if no_dry_run:
remote.project = project
remote.save()
print(f'{project.slug: <40} {remote.pk: <10} {remote.html_url: <60} {remote.users.first().username: <20} {admin: <5}') # noqa
print('Total:', len(connected_projects))
if not no_dry_run:
print(
'Changes WERE NOT applied to the database. '
'Run it with --no-dry-run to save the changes.'
)
def handle(self, *args, **options):
no_dry_run = options.get('no_dry_run')
only_owners = options.get('only_owners')
force_owners_social_resync = options.get('force_owners_social_resync')
for organization in options.get('organization'):
try:
organization = Organization.objects.get(slug=organization)
if force_owners_social_resync:
self._force_owners_social_resync(organization)
self._connect_repositories(organization, no_dry_run, only_owners)
except Organization.DoesNotExist:
print(f'Organization does not exist. organization={organization}')
|
-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
# Delete stale NICs
orm.NetworkInterface.objects.filter(machine__deleted=True).delete()
for nic in orm.NetworkInterface.objects.select_related('machine',
'network').all():
userid = nic.machine.userid
nic.userid = userid
nic.save()
network = nic.network
for attr in ["ipv4", "ipv6"]:
address = getattr(nic, attr)
if address:
| ipversion = 4 if attr == "ipv4" else 6
subnet = nic.network.subnets.get(ipversion=ipversion)
orm.IPAddress.objects.create(network=network,
subnet=subnet,
nic=nic,
userid=userid,
address=address)
def | backwards(self, orm):
"Write your backwards methods here."
for ip in orm.IPAddress.objects.filter(deleted=False):
nic = ip.nic
attr = "ipv4" if nic.subnet.ipversion == 4 else "ipv6"
setattr(nic, attr, ip.address)
nic.save()
models = {
'db.backend': {
'Meta': {'ordering': "['clustername']", 'object_name': 'Backend'},
'clustername': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'ctotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'dfree': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'disk_templates': ('synnefo.db.fields.SeparatedValuesField', [], {'null': 'True'}),
'drained': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'dtotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'hypervisor': ('django.db.models.fields.CharField', [], {'default': "'kvm'", 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'unique': 'True'}),
'mfree': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'mtotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'offline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'pinst_cnt': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'port': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5080'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'db.backendnetwork': {
'Meta': {'unique_together': "(('network', 'backend'),)", 'object_name': 'BackendNetwork'},
'backend': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'networks'", 'on_delete': 'models.PROTECT', 'to': "orm['db.Backend']"}),
'backendjobid': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backendjobstatus': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendlogmsg': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'backendopcode': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendtime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1, 1, 1, 0, 0)'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mac_prefix': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'backend_networks'", 'to': "orm['db.Network']"}),
'operstate': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '30'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'db.bridgepooltable': {
'Meta': {'object_name': 'BridgePoolTable'},
'available_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'base': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offset': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'reserved_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'db.flavor': {
'Meta': {'unique_together': "(('cpu', 'ram', 'disk', 'disk_template'),)", 'object_name': 'Flavor'},
'cpu': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'disk': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'disk_template': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ram': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'db.floatingip': {
'Meta': {'object_name': 'FloatingIP'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipv4': ('django.db.models.fields.IPAddressField', [], {'unique': 'True', 'max_length': '15', 'db_index': 'True'}),
'machine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'floating_ips'", 'null': 'True', 'to': "orm['db.VirtualMachine']"}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'floating_ips'", 'to': "orm['db.Network']"}),
'serial': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'floating_ips'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['db.QuotaHolderSerial']"}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'db.ipaddress': {
'Meta': {'unique_together': "(('network', 'address'),)", 'object_name': 'IPAddress'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'floating_ip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ips' |
r Render tsr samples and push direction vectors during planning
"""
HerbGrasp(robot, obj, manip=manip, preshape=preshape,
tsrlist=tsrlist, render=render)
@ActionMethod
def PushGrasp(robot, obj, push_distance=0.1, manip=None,
preshape=[0., 0., 0., 0.], push_required=True,
tsrlist=None, render=True, **kw_args):
"""
@param robot The robot performing the push grasp
@param obj The object to push grasp
@param distance The distance to push before grasping
@param manip The manipulator to perform the grasp with
(if None active manipulator is used)
@param push_required If true, throw exception if a plan for the pushing
movement cannot be found. If false, continue with grasp even if push
cannot be executed.
@param preshape The grasp preshape for the hand
@param tsrlist A list of TSRChain objects to use for planning to grasp pose
(if None, the 'grasp' tsr from tsrlibrary is used)
@param render Render tsr samples and push direction vectors during planning
"""
if tsrlist is None:
tsrlist = robot.tsrlibrary(obj, 'push_grasp', push_distance=push_distance)
HerbGrasp(robot, obj, manip=manip, preshape=preshape,
push_distance=push_distance,
tsrlist=tsrlist, render=render)
def HerbGrasp(robot, obj, push_distance=None, manip=None,
preshape=[0., 0., 0., 0.],
push_required=False,
tsrlist=None,
render=True,
**kw_args):
"""
@param robot The robot performing the push grasp
@param obj The object to push grasp
@param distance The distance to push before grasping (if None, no pushing)
@param manip The manipulator to perform the grasp with
(if None active manipulator is used)
@param preshape The grasp preshape for the hand
@param push_required If true, throw exception if a plan for the pushing
movement cannot be found. If false, continue with grasp even if push
cannot be executed. (only used if distance is not None)
@param render Render tsr samples and push direction vectors during planning
"""
if manip is None:
with robot.GetEnv():
manip = robot.GetActiveManipulator()
# Move the hand to the grasp preshape
manip.hand.MoveHand(*preshape)
# Get the grasp tsr
if tsrlist is None:
tsrlist = robot.tsrlibrary(obj, 'grasp')
# Plan to the grasp
with prpy.viz.RenderTSRList(tsrlist, robot.GetEnv(), render=render):
manip.PlanToTSR(tsrlist)
if push_distance is not None:
ee_in_world = manip.GetEndEffectorTransform()
push_direction = ee_in_world[:3,2]
# Move the object into the hand
env = robot.GetEnv()
with env:
obj_in_world = obj.GetTransform()
# First move back until collision
stepsize = 0.01
total_distance = 0.0
while not env.CheckCollision(robot, obj) and total_distance <= push_distance:
obj_in_world[:3,3] -= stepsize*push_direction
total_distance += stepsize
obj.SetTransform(obj_in_world)
# Then move forward until just out of collision
stepsize = 0.001
while env.CheckCollision(robot, obj):
obj_in_world[:3,3] += stepsize*push_direction
obj.SetTransform(obj_in_world)
# Manipulator must be active for grab to work properly
p = openravepy.KinBody.SaveParameters
with robot.CreateRobotStateSaver(p.ActiveManipulator):
robot.SetActiveManipulator(manip)
robot.Grab(obj)
# Now execute the straight line movement
with prpy.viz.RenderVector(ee_in_world[:3,3], push_direction,
push_distance, robot.GetEnv(), render=render):
try:
with prpy.rave.Disabled(obj):
manip.PlanToEndEffectorOffset(direction = push_direction,
distance = push_distance,
**kw_args)
except PlanningError, e:
if push_required:
raise
else:
logger.warn('Could not find a plan for straight line push. Ignoring.')
robot.Release(obj)
# Now close the hand to grasp
manip.hand.CloseHand()
# Manipulator must be active for grab to work properly
p = openravepy.KinBody.SaveParameters
with robot.CreateRobotStateSaver(p.ActiveManipulator):
robot.SetActiveManipulator(manip)
robot.Grab(obj)
@ActionMethod
def Lift(robot, obj, distance=0.05, manip=None, render=True, **kw_args):
"""
@param robot The robot p | erforming the push grasp
@param obj The object to lift
@param distance The distance to lift the cup
@para | m manip The manipulator to perform the grasp with
(if None active manipulator is used)
@param render Render tsr samples and push direction vectors during planning
"""
if manip is None:
with robot.GetEnv():
manip = robot.GetActiveManipulator()
# Check for collision and disable anything in collision
creport = openravepy.CollisionReport()
disabled_objects = []
# Resolve inconsistencies in grabbed objects
if robot.CheckSelfCollision():
grabbed_objs = robot.GetGrabbed()
for obj in grabbed_objs:
robot.Release(obj)
for obj in grabbed_objs:
robot.Grab(obj)
# Create list of any current collisions so those can be disabled
while robot.GetEnv().CheckCollision(robot, creport):
collision_obj = creport.plink2.GetParent()
disabled_objects.append(collision_obj)
collision_obj.Enable(False)
for obj in disabled_objects:
obj.Enable(True)
# Perform the lift
with prpy.rave.AllDisabled(robot.GetEnv(), disabled_objects):
lift_direction = [0., 0., 1.]
lift_distance = distance
ee_in_world = manip.GetEndEffectorTransform()
with prpy.viz.RenderVector(ee_in_world[:3,3], lift_direction,
distance, robot.GetEnv(), render=render):
manip.PlanToEndEffectorOffset(direction=lift_direction,
distance=lift_distance,
**kw_args)
@ActionMethod
def Place(robot, obj, on_obj, given_point_on=None, manip=None, render=True, **kw_args):
"""
Place an object onto another object
This assumes the 'point_on' tsr is defined for the on_obj and
the 'place' tsr is defined for obj
@param robot The robot performing the push grasp
@param obj The object to place
@param on_obj The object to place obj on
<<<<<<< HEAD
@param given_point_on 4x4 numpy array (pose matrix) "X"-marked location on on_obj, in on_obj's coordinates.
=======
@param given_point_on "X"-marked location on on_obj, in on_obj's coordinates.
>>>>>>> 9f308684b627a4226976116aa37f44343fa92eb8
@param manip The manipulator to perform the grasp with
(if None active manipulator is used)
@param render Render tsr samples and push direction vectors during planning
"""
if manip is None:
with robot.GetEnv():
manip = robot.GetActiveManipulator()
# Get a tsr to sample places to put the glass
obj_extents = obj.ComputeAABB().extents()
obj_radius = max(obj_extents[0], obj_extents[1])
if (given_point_on == None):
dest_tsr = robot.tsrlibrary(on_obj, 'point_on', padding=obj_radius)
else:
# Given a point on the on_obj to place obj
dest_tsr = robot.tsrlibrary(on_obj, 'given_point_on', given_point_on, manip=manip);
# Now use this to get a tsr for sampling ee_poses
place_tsr = robot.tsrlibrary(obj, 'place', pose_tsr_chain = dest_tsr[0], manip=manip)
# Plan to the grasp
with prpy.viz.RenderTSRList(place_tsr, robot.GetEnv(), render=render):
manip.PlanToTSR(pl |
import pygame
import src.graphics as graphics
import src.colours as colours
import src.config as config
import s | rc.scenes.scenebase as scene_base
from src.minigames.hunt.input_handler import InputHandler
from src.gui.clickable import Clickable
from src.resolution_asset_sizer import ResolutionAssetSizer
from src.tiled_map import TiledMap
from src.game_object.deadly_area import DeadlyArea
from src.minigames.hunt.player import Player
from src.minigames.hunt.collectible import Collectible
class Hunt(scene_base.SceneBase):
"""The Hunt minigame...pretty much snake"""
def __init__(self, previous, | current_stage=1):
self.current_stage = current_stage
self.file = './assets/game-data/levels/minigames/hunt/minigame-hunt-' + str(self.current_stage) + '.tmx'
self.surface = graphics.get_window_surface()
self.tiled_map = TiledMap(self.file, self.surface)
self.sprites = self.tiled_map.sprites
self.player = self.get_player()
self.collectibles = pygame.sprite.Group([sprite for sprite in self.sprites if isinstance(sprite, Collectible)])
self.collideables = pygame.sprite.Group([sprite for sprite in self.sprites if isinstance(sprite, DeadlyArea)])
scene_base.SceneBase.__init__(
self,
InputHandler(self),
graphics.get_controller()
)
self.previous = previous
self.width, self.height = pygame.display.get_window_size()
def update(self, delta_time):
self.sprites.update(delta_time, self.tiled_map)
self.player.handle_collision(self.collectibles, self.collideables)
if not self.player.alive():
self.reset()
if self.has_completed_minigame():
self.previous.open_secured_door()
self.switch_to_scene(self.previous)
elif self.has_won():
self.next_stage()
def has_won(self):
has_no_enemies = True
for sprite in self.sprites:
if isinstance(sprite, Collectible):
has_no_enemies = False
return has_no_enemies
def has_completed_minigame(self):
return self.has_won() and self.current_stage == 3
def render(self):
self.surface.fill(colours.RED)
self.sprites.draw(self.surface)
def get_player(self):
for sprite in self.sprites:
if isinstance(sprite, Player):
return sprite
def reset(self):
self.__init__(self.previous, self.current_stage)
def next_stage(self):
self.current_stage += 1
self.__init__(self.previous, self.current_stage)
|
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from flask import request
from indico.modules.auth.controllers import (RHAccounts, RHAdminImpersonate, RHLinkAccount, RHLogin, RHLoginForm,
RHLogout, RHRegister, RHRemoveAccount, RHResetPassword)
from indico.web.flask.util import make_compat_redirect_func
from indico.web.flask.wrappers import IndicoBlueprint
_bp = IndicoBlueprint('auth', __name__, template_folder='templates', virtual_template_folder='auth')
_bp.add_url_rule('/login/', 'login', RHLogin, methods=('GET', 'POST'))
_bp.add_url_rule('/login/<provider>/', 'login', RHLogin)
_bp.add_url_rule('/login/<provider>/form', 'login_form', RHLoginForm)
_bp.add_url_rule('/login/<provider>/link-account', 'link_account', RHLinkAccount, methods=('GET', 'POST'))
_bp.add_url_rule('/logout/', 'logout', RHLogout)
_bp.add_url_rule('/register/', 'register', RHRegister, methods=('GET', 'POST'), defaults={'provider': None})
_bp.add_url_rule('/register/ | <provider>', 'register', RHRegister, methods=('GET', 'POST'))
_bp.add_url_rule('/reset-password/', 'resetpass', RHResetPassword, methods=('GET', 'POST'))
_bp.add_url_rule('/admin/users/impersonate', 'admin_impersonate', RHAdminImpersonate, methods=('POST',))
with _bp.add_prefixed_rules('/user/<int:user_id>', '/user'):
_bp.add_url_rule('/accounts/', 'accounts', RHAccounts, methods=('GET', 'POST'))
_bp.add_url_rule('/acco | unts/<identity>/remove/', 'remove_account', RHRemoveAccount, methods=('POST',))
@_bp.url_defaults
def _add_user_id(endpoint, values):
if endpoint in {'auth.accounts', 'auth.remove_account'} and 'user_id' not in values:
values['user_id'] = request.view_args.get('user_id')
# Legacy URLs
auth_compat_blueprint = _compat_bp = IndicoBlueprint('compat_auth', __name__)
_compat_bp.add_url_rule('/user/login', 'login', make_compat_redirect_func(_bp, 'login'))
_compat_bp.add_url_rule('/user/register', 'register', make_compat_redirect_func(_bp, 'register'))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.contrib.auth.models import Group, User
|
def add_moderator_group(apps, schema_editor):
g = Group.objects.create(name="moderators")
g.save()
for user in User.objects.all():
# | add any existing admin users
# to the moderators group when we create it
if user.is_superuser:
g.user_set.add(user)
class Migration(migrations.Migration):
dependencies = [("auth", "0008_alter_user_username_max_length")]
operations = [migrations.RunPython(add_moderator_group)]
|
import nuk | e
import | pyblish.api
class ExtractSceneSave(pyblish.api.Extractor):
"""
"""
hosts = ['nuke']
order = pyblish.api.Extractor.order - 0.45
families = ['scene']
label = 'Scene Save'
def process(self, instance):
self.log.info('saving scene')
nuke.scriptSave()
|
"""Responses."""
from io import StringIO
from csv import DictWriter
from flask import Response, jsonify, make_response
from .__version__ import __version__
class ApiResult:
"""A representation of a generic JSON API result."""
def __init__(self, data, metadata=None, **kwargs):
"""Store input arguments.
Args:
data (dict): A dictionary built up for the API to return
metadata (dict): A dictionary of keys and values to add to the
metadata field of the return object.
"""
self.data = data
self.extra_metadata = metadata
self.kwargs = kwargs
def to_respons | e(self):
"""Make a response from the data."""
metadata = self.metadata(self.extra_metadata)
obj = {
**self.data,
**self.kwargs,
'metadata': metadata
}
return jsonify(obj)
@staticmetho | d
def metadata(extra_metadata=None):
"""Return metadata."""
from .models import SourceData
obj = {
'version': __version__,
'datasetMetadata': [item.to_json() for item in
SourceData.query.all()]
}
if extra_metadata:
obj.update(extra_metadata)
return obj
class QuerySetApiResult(ApiResult):
"""A representation of a list of records (Python dictionaries)."""
def __init__(self, record_list, return_format, metadata=None, **kwargs):
"""Store the list of records and the format."""
super().__init__(record_list, metadata, **kwargs)
self.record_list = record_list
self.return_format = return_format
def to_response(self):
"""Convert the list of records into a response."""
if self.return_format == 'csv' and self.record_list:
return self.csv_response(self.record_list)
elif self.return_format == 'csv': # and not self.record_list
return make_response('', 204)
# Default is JSON
return self.json_response(self.record_list, self.extra_metadata,
**self.kwargs)
@staticmethod
def csv_response(record_list):
"""CSV Response."""
string_io = StringIO()
header = record_list[0].keys()
writer = DictWriter(f=string_io, fieldnames=header)
writer.writeheader()
writer.writerows((item for item in record_list))
result = string_io.getvalue()
return Response(result, mimetype='text/csv')
@staticmethod
def json_response(record_list, extra_metadata, **kwargs):
"""Convert a list of records into a JSON response."""
obj = {
**kwargs,
'results': record_list,
'resultSize': len(record_list),
'metadata': ApiResult.metadata(extra_metadata)
}
return jsonify(obj)
# TODO: (jef/jkp 2017-08-29) Add methods for:
# * return warnings, errors
# * return version number
# * documentation
# Needs: Decision on how these should be returned.
|
# -*- coding: utf-8 -*-
# Akvo RSR is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
def related_activities(project):
"""
Check if related project has an IATI identifier and relation.
:param project: Project object
:return: All checks passed boolean, [Check results]
"""
checks = []
all_checks_passed = True
related_projects_count = project.related_projects.count()
for rp in project.related_projects.prefetch_related('related_project').all():
if not (rp.related_project or rp.related_iati_id):
all_checks_passed = False
checks.append(('error', 'related project or IATI identifier | not specified'))
elif rp.related_project and not rp.related_project.iati_activity_id:
all_checks_passed = False
checks.append(('error', 'related project (id: %s) has no IATI identifier specified' %
| str(rp.related_project.pk)))
if not rp.relation:
all_checks_passed = False
checks.append(('error', 'relation missing for related project'))
if related_projects_count > 0 and all_checks_passed:
checks.append(('success', 'has valid related project(s)'))
return all_checks_passed, checks
|
from formatting import print_call
import credentials
import os.path
import re
import xmlrpclib
def _get_ch_params():
# Initialise variables when required
from core.config import FullConfParser
fcp = FullConfParser()
username = fcp.get("auth.conf").get("certificates").get("username")
ch_host = fcp.get("auth.conf").get("clearinghouse").get("host")
ch_port = fcp.get("auth.conf").get("clearinghouse").get("port")
ch_end = fcp.get("auth.conf").get("clearinghouse").get("endpoint")
return (username, ch_host, ch_port, ch_end)
def api_call(method_name, endpoint=None, params=[], username=None,
verbose=False):
user, _, _, ch_end = _get_ch_params()
username = username or user
endpoint = endpoint or ch_end
key_path = "%s-key.pem" % username
cert_path = "%s-cert.pem" % username
res = ssl_call(method_name, params, endpoint,
key_path=key_path, cert_path=cert_path)
if verbose:
print_call(method_name, params, res)
return res.get("code", None), \
res.get("value", None), res.get("output", None)
def ch_call(method_name, endpoint=None, params=[], username=None,
verbose=False):
user, ch_host, ch_port, ch_end = _get_ch_params()
username = username or user
endpoint = endpoint or ch_end
key_path = "%s-key.pem" % username
cert_path = "%s-cert.pem" % username
res = ssl_call(method_name, params, endpoint, key_path=key_path,
cert_path=cert_path, host=ch_host, port=ch_port)
return res
def handler_call(method_name, params=[], username=None, arg=[]):
if username is None:
user, _, _, _ = _get_ch_params()
verbose = False
if arg in ["-v", "--verbose"]:
verbose = True
return api_call(method_name, "/xmlrpc/geni/3/", params=params,
username=username, verbose=verbose)
class SafeTransportWithCert(xmlrpclib.SafeTransport):
"""
Helper class to force the right certificate for the transport class.
"""
def __init__(self, key_path, cert_path):
# No super because of old-style class
xmlrpclib.SafeTransport.__init__(self)
self._key_path = key_path
self._cert_path = cert_path
def make_connection(self, host):
"""
This method will automatically be called by the ServerProxy class
when a transport channel is needed.
"""
host_with_cert = (host, {"key_file": self._key_path,
"cert_file": self._cert_path})
# No super because of old-style class
return xmlrpclib.SafeTransport.make_connection(self, host_with_cert)
def ssl_call(method_name, params, endpoint, key_path=None, cert_path=None,
host=None, port=None):
username, ch_host, ch_port, ch_end = _get_ch_params()
key_path = key | _path or ("%-key.pem" % username)
cert_path = cert_path or ("%-cert.pem" % username)
host = host or ch_host
port = por | t or ch_port
endpoint = endpoint or ch_end
# Start logic
creds_path = os.path.normpath(os.path.join(os.path.dirname(__file__),
"../../..", "cert"))
if not os.path.isabs(key_path):
key_path = os.path.join(creds_path, key_path)
if not os.path.isabs(cert_path):
cert_path = os.path.join(creds_path, cert_path)
key_path = os.path.abspath(os.path.expanduser(key_path))
cert_path = os.path.abspath(os.path.expanduser(cert_path))
if not os.path.isfile(key_path) or not os.path.isfile(cert_path):
raise RuntimeError("Key or cert file not found (%s, %s)"
% (key_path, cert_path))
transport = SafeTransportWithCert(key_path, cert_path)
if endpoint and len(endpoint):
if endpoint[0] == "/":
endpoint = endpoint[1:]
proxy = xmlrpclib.ServerProxy("https://%s:%s/%s" % (host, str(port),
endpoint), transport=transport)
# return proxy.get_version()
method = getattr(proxy, method_name)
return method(*params)
def getusercred(geni_api=3):
"""Retrieve your user credential. Useful for debugging.
If you specify the -o option, the credential is saved to a file.
If you specify --usercredfile:
First, it tries to read the user cred from that file.
Second, it saves the user cred to a file by that name
(but with the appropriate extension)
Otherwise, the filename is <username>-<framework nickname from
config file>-usercred.[xml or json, depending on AM API version].
If you specify the --prefix option then that string starts the filename.
If instead of the -o option, you supply the --tostdout option,
then the usercred is printed to STDOUT.
Otherwise the usercred is logged.
The usercred is returned for use by calling scripts.
e.g.:
Get user credential, save to a file:
omni.py -o getusercred
Get user credential, save to a file with filename prefix mystuff:
omni.py -o -p mystuff getusercred
"""
from core.config import FullConfParser
fcp = FullConfParser()
username = fcp.get("auth.conf").get("certificates").get("username")
creds_path = os.path.normpath(
os.path.join(os.path.dirname(__file__), "../../..", "cert"))
cert_path = os.path.join(creds_path, "%s-cert.pem" % username)
# Retrieve new credential by contacting with GCF CH
try:
user_cert = open(cert_path, "r").read()
cred = ch_call("CreateUserCredential", params=[user_cert])
# Exception? -> Retrieve already existing credential from disk (CBAS)
except:
cred_path = os.path.join(creds_path, "%s-cred.xml" % username)
cred = open(cred_path).read()
if geni_api >= 3:
if cred:
cred = credentials.wrap_cred(cred)
credxml = credentials.get_cred_xml(cred)
# pull the username out of the cred
# <owner_urn>urn:publicid:IDN+geni:gpo:gcf+user+alice</owner_urn>
user = ""
usermatch = re.search(
r"\<owner_urn>urn:publicid:IDN\+.+\+user\+(\w+)\<\/owner_urn\>",
credxml)
if usermatch:
user = usermatch.group(1)
return ("Retrieved %s user credential" % user, cred)
|
from twi | sted.internet import reactor,protocol
class EchoClient(protocol.Protocol):
def connectionMade(self):
self.transport.write("hello a ")
def dataReceived(self, data):
print('Server said:',data)
self.transport.loseConnection()
def connectionLost(self, | reason):
print('connection lost')
class EchoFatoty(protocol.ClientFactory):
protocol = EchoClient
def clientConnectionFailed(self, connector, reason):
print('Connection lost - goodbye!')
reactor.stop()
def main():
f = EchoFatoty()
reactor.connectTCP('localhost',9090,f)
reactor.run()
if __name__ == '__main__':
main() |
# -*- coding: utf-8 -*-
"""The function module of dolfin"""
from dolfin.functions import multimeshfunction
from dolfin.functions import functionspace
from dolfin.functions import function
from dolfin.functions import constant
from dolfin.functions import expression
from dolfin.functions import specialfunctions
from .multimeshfunction import *
from .functionspace import *
from .function import *
from .constant import *
from .expression import *
from .specialfunctions | import | *
# NOTE: The automatic documentation system in DOLFIN requires to _not_ define
# NOTE: classes or functions within this file. Use separate modules for that
# NOTE: purpose.
__all__ = functionspace.__all__ + function.__all__ + constant.__all__ + \
expression.__all__ + specialfunctions.__all__ + \
multimeshfunction.__all__
|
from pyramid.view import view_config
import logging
@view_config(route_name='hell | o_json', renderer='json')
def hello_json(request):
logger = logging.getLogger(__name__)
logger.info("Got JSON from name: {n}".format(n = __name__))
request.session['co | unter'] = request.session.get('counter', 0) + 1
return {
'a': [1,2,request.session['counter']],
'b': ['x', 'y'],
}
|
rn dict
Charateristic of a tweet
"""
ret = {}
text = tweet['text']
retweets = tweet['retweet_count']
favorites = tweet['favorite_count']
followers = tweet['author_followers']
friends = tweet['author_friends']
publishes = tweet['author_num_of_status']
blob = TextBlob(text)
polarity = blob.sentiment.polarity
ret['scaled_polarity'] = calculate_scaled_polarity(
polarity,
int(retweets),
int(favorites),
int(followers),
int(friends),
int(publishes)
)
ret['retweets'] = retweets
ret['favorites'] = favorites
ret['followers'] = followers
ret['friends'] = friends
ret['publishes'] = publishes
ret['polarity'] = polarity
# print 'p=%.2f re=%d fav=%d, fol=%d, fd=%d, pub=%d' % (
# polarity, retweets, favorites, followers, friends, publishes
# )
return ret
def calculate_scaled_polarity(
polarity, retweets, favorites, followers, friends, publishes):
"""
Return a scaled polarity for a tweet
@param polarity float
@param retweets int
@param favorites int
@param followers int
@param friends int
@param publishes int
@return float
"""
# Avoid zero case and negative value
retweets = retweets if retweets > 0 else 1
favorites = favorites if favorites > 0 else 1
followers = followers if followers > 0 else 1
friends = friends if friends > 0 else 1
publishes = publishes if publishes > 0 else 1
# Entropy
ret = polarity * \
(
log(retweets, 2) +
log(favorites, 2) +
log(followers, 2) +
log(friends, 2) +
log(p | ublishes, 2)
)
return round(ret, 2)
def tweets2film(tweet_characteristics):
"""
Aggreate tweet's characteristics to form a film's characteristics
@param tweet_characterist | ics list of dict
@return dict
characteristics of a film
"""
ret = {}
retweets_data = []
favorites_data = []
polarities_data = []
friends_data = []
followers_data = []
for t in tweet_characteristics:
retweets_data.append(t['retweets'])
favorites_data.append(t['favorites'])
polarities_data.append(t['polarity'])
friends_data.append(t['friends'])
followers_data.append(t['followers'])
retweets = numpy.array(retweets_data)
favorites = numpy.array(favorites_data)
polarities = numpy.array(polarities_data)
friends = numpy.array(friends_data)
followers = numpy.array(followers_data)
for data_set in [
('retweets', retweets),
('favorites', favorites),
('polarities', polarities),
('friends', friends),
('followers', followers)
]:
data_name = data_set[0]
data_list = data_set[1]
print '|%s| sd: %f mean: %f min: %d max: %d' % (
data_name,
round(data_list.std(), 2),
round(numpy.average(data_list), 2),
data_list.min(),
data_list.max(),
)
# ret['avg_followers'] = round(numpy.average(followers_data), 2)
# ret['avg_friends'] = round(numpy.average(friends_data), 2)
ret['avg_polarity'] = round(numpy.average(polarities_data), 2)
# ret['avg_retweet'] = round(numpy.average(retweets_data), 2)
# ret['std_friends'] = round(friends.std(), 2)
# ret['std_followers'] = round(followers.std(), 2)
# ret['std_polarity'] = round(polarities.std(), 2)
ret['std_retweet'] = round(retweets.std(), 2)
# ret['log_friends'] = round(log(sum(friends_data)) / log(2), 2)
# ret['log_followers'] = round(log(sum(followers_data)) / log(2), 2)
ret['log_retweets'] = round(log(sum(retweets_data)) / log(2), 2)
ret['log_favorites'] = round(log(sum(favorites_data)) / log(2), 2)
return ret
def construct_film_characteristic(film_name, tweet_characteristics):
"""
Construct featuresets for given parameters
@param film_name string
@param tweet_characteristics list of dict
@return featuresets
"""
ret = {}
# Analyze film's attributes
ret['length_of_film'] = len(film_name)
ret['number_of_words'] = len(film_name.split(' '))
# Analyze tweet's characteristics
aggreated_characteristic = tweets2film(tweet_characteristics)
# Merge 2 characteristics
ret = dict(ret.items() + aggreated_characteristic.items())
return ret
def predictCandidates():
list_of_files = os.listdir(CANDIDATE_DIR)
for fn in list_of_files:
path = os.path.join(CANDIDATE_DIR, fn)
film_name = os.path.splitext(fn)[0]
with open(path, 'r') as f:
tweets = json.load(f)
tweets = json.loads(tweets)
tweet_characteristics = []
for tweet in tweets:
# Per tweet analyze
characteristic = attribute_to_characteristic(tweet)
tweet_characteristics.append(characteristic)
film_characteristic = construct_film_characteristic(
film_name,
tweet_characteristics
)
result = classifier.classify(film_characteristic)
print 'film: |%s| PREDICT: |%s|\n' % (film_name, result)
features = []
for my_dir in [OSCAR_DIR, RAZZIES_DIR]:
label = os.path.basename(my_dir)
print "=========== Training {0} ============".format(label)
for fn in os.listdir(my_dir):
path = os.path.join(my_dir, fn)
film_name = os.path.splitext(fn)[0]
# print 'dir=%s, film_name=%s, path=%s' % (my_dir, film_name, path)
with open(path, 'r') as f:
tweets = json.load(f)
tweets = json.loads(tweets)
tweet_characteristics = []
for tweet in tweets:
# Per tweet analyze
characteristic = attribute_to_characteristic(tweet)
tweet_characteristics.append(characteristic)
try:
film_characteristic = construct_film_characteristic(
film_name,
tweet_characteristics
)
except Exception as e:
print '{0}: {1}'.format(film_name, e)
else:
# print 'film: |%s|' % film_name
# print film_characteristic
feature = (film_characteristic, label)
features.append(feature)
# Train the classifier
classifier = NaiveBayesClassifier.train(features)
classifier.show_most_informative_features(10)
# Predict the film
report = {}
predict_labels = ['oscar', 'razzies']
for predict_label in predict_labels:
my_dir = os.path.join(PREDICT_DIR, predict_label)
list_of_files = os.listdir(my_dir)
report[predict_label] = {
'number_of_match': 0,
'number_of_films': len(list_of_files)
}
for fn in list_of_files:
path = os.path.join(my_dir, fn)
film_name = os.path.splitext(fn)[0]
with open(path, 'r') as f:
tweets = json.load(f)
tweets = json.loads(tweets)
tweet_characteristics = []
for tweet in tweets:
# Per tweet analyze
characteristic = attribute_to_characteristic(tweet)
tweet_characteristics.append(characteristic)
film_characteristic = construct_film_characteristic(
film_name,
tweet_characteristics
)
result = classifier.classify(film_characteristic)
if result == predict_label:
report[predict_label]['number_of_match'] += 1
print film_characteristic
print 'film: |%s| PREDICT: |%s|\n' % (film_name, result)
report['features'] = film_characteristic.keys()
# classifier.show_most_informative_features()
print "# Features in film's characteristic\n"
for f in report['features']:
print '* %s' % f
print '\n# Prediction\n'
for predict_label in predict_labels:
r = report[predict_label]
print '## %s\n' % predict_label
print 'match %d out of %d, accuracy=%d%%\n' % (
r['number_of_match'],
r['number_of_films'],
round(r['number_of_match'] / r['number_of_films'] * 100)
)
print '## overall\n'
print 'match %d out of %d, accuracy=%d%%\n' % (
sum(
[report[p]['number_of_matc |
"""Mark the current profile as saved by colouring the text black.
"""
index = self.profile_combo.currentIndex()
item = self.profile_combo.model().item(index)
item.setForeground(QtGui.QColor('black'))
def add_new_resource(self):
"""Handle add new resource requests.
"""
parameters_widget = [
self.parameters_scrollarea.layout().itemAt(i) for i in
range(self.parameters_scrollarea.layout().count())][0].widget()
parameter_widgets = [
parameters_widget.vertical_layout.itemAt(i).widget() for i in
range(parameters_widget.vertical_layout.count())]
parameter_widgets[0].set_text('')
parameter_widgets[1].set_text('')
parameter_widgets[2].set_text('')
parameter_widgets[3].set_text('')
parameter_widgets[4].set_text('')
parameter_widgets[5].set_value(10)
parameter_widgets[6].set_value(0)
parameter_widgets[7].set_value(100)
parameter_widgets[8].set_text(tr('weekly'))
parameter_widgets[9].set_text(tr(
"A displaced person should be provided with "
"{{ Default }} {{ Unit }}/{{ Units }}/{{ Unit abbreviation }} of "
"{{ Resource name }}. Though no less than {{ Minimum allowed }} "
"and no more than {{ Maximum allowed }}. This should be provided "
"{{ Frequency }}."))
self.stacked_widget.setCurrentWidget(self.resource_edit_page)
# hide the close button
self.button_box.button(QDialogButtonBox.Close).setHidden(True)
def edit_resource(self):
"""Handle edit resource requests.
"""
self.mark_current_profile_as_pending()
resource = None
for item in self.resources_list.selectedItems()[:1]:
resource = item.resource_full
self.edit_item = item
if not resource:
return
parameters_widget = [
self.parameters_scrollarea.layout().itemAt(i) for i in
range(self.parameters_scrollarea.layout().count())][0].widget()
parameter_widgets = [
parameters_widget.vertical_layout.itemAt(i).widget() for i in
range(parameters_widget.vertical_layout.count())]
parameter_widgets[0].set_text(resource['Resource name'])
parameter_widgets[1].set_text(resource['Resource description'])
parameter_widgets[2].set_text(resource['Unit'])
parameter_widgets[3].set_text(resource['Units'])
parameter_widgets[4].set_text(resource['Unit abbreviation'])
parameter_widgets[5].set_value(float(resource['Default']))
parameter_widgets[6].set_value(float(resource['Minimum allowed']))
parameter_widgets[7].set_value(float(resource['Maximum allowed']))
parameter_widgets[8].set_text(resource['Frequency'])
parameter_widgets[9].set_text(resource['Readable sentence'])
self.switch_context(self.resource_edit_page)
def set_up_resource_parameters(self):
"""Set up the resource parameter for the add/edit view.
"""
name_parameter = StringParameter('UUID-1')
name_parameter.name = tr('Resource name')
name_parameter.help_text = tr(
'Name of the resource that will be provided '
'as part of minimum needs. '
'e.g. Rice, Water etc.')
name_parameter.description = tr(
'A <b>resource</b> is something that you provide to displaced '
'persons in the event of a disaster. The resource will be made '
'available at IDP camps and may need to be stockpiled by '
'contingency planners in their preparations for a disaster.')
name_parameter.is_required = True
name_parameter.value = ''
description_parameter = StringParameter('UUID-2')
description_parameter.name = tr('Resource description')
description_parameter.help_text = tr(
'Description of the resource that will be provided as part of '
'minimum needs.')
description_parameter.description = tr(
'This gives a detailed description of what the resource is and ')
description_parameter.is_required = True
description_parameter.value = ''
unit_parameter = StringParameter('UUID-3')
unit_parameter.name = tr('Unit')
unit_parameter.help_text = tr(
'Single unit for the resources spelled out. e.g. litre, '
'kilogram etc.')
unit_parameter.description = tr(
'A <b>unit</b> is the basic measurement unit used for computing '
'the allowance per individual. For example when planning water '
'rations the unit would be single litre.')
unit_parameter.is_required = True
unit_parameter.value = ''
units_parameter = StringParameter('UUID-4')
units_parameter.name = tr('Units')
units_parameter.help_text = tr(
'Multiple units for the resources spelled out. e.g. litres, '
'kilogram etc.')
units_parameter.description = tr(
'<b>Units</b> are the basic measurement used for computing the '
'allowance per individual. For example when planning water '
'rations the units would be litres.')
units_parameter.is_required = True
units_parameter.value = ''
unit_abbreviation_parameter = StringParameter('UUID-5')
unit_abbreviation_parameter.name = tr('Unit abbreviation')
unit_abbreviation_parameter.help_text = tr(
'Abbreviations of unit for the resources. e.g. l, kg etc.')
unit_abbreviation_parameter.description = tr(
"A <b>unit abbreviation</b> is the basic measurement unit's "
"shortened. For example when planning water rations "
"the units would be l.")
unit_abbreviation_parameter.is_required = True
unit_abbreviation_parameter.value = ''
minimum_parameter = FloatParameter('UUID-6')
minimum_parameter.name = tr('Minimum allowed')
minimum_parameter.is_required = True
minimum_parameter.precision = 2
minimum_parameter.minimum_allowed_value = -99999.0
minimum_parameter.maximum_allowed_value = 99999.0
minimum_parameter.help_text = tr(
'The minimum allowable quantity per person. ')
minimum_parameter.description = tr(
'The <b>minimum</b> is the minimum allowed quantity of the '
'resource per person. For example you may dictate that the water '
'ration per person per day should never be allowed to be less '
'than 0.5l. This is enforced when tweaking a minimum needs set '
'before an impact evaluation')
| minimum_parameter.value = 0.00
maximum_parameter = FloatParameter('UUID-7')
maximum_parameter.name = tr('Maximum allowed')
maximum_parameter.is_required = True
maximum_parameter.precision = 2
maximum_parameter.minimum_allowed_value = -99999.0
maximum_parameter.maximum_allowed_value = 99999.0
maximum_parameter.help_text = tr(
'The maximum allowable quantity per pers | on. ')
maximum_parameter.description = tr(
'The <b>maximum</b> is the maximum allowed quantity of the '
'resource per person. For example you may dictate that the water '
'ration per person per day should never be allowed to be more '
'than 67l. This is enforced when tweaking a maximum needs set '
'before an impact evaluation.')
maximum_parameter.value = 100.0
default_parameter = FloatParameter('UUID-8')
default_parameter.name = tr('Default')
default_parameter.is_required = True
default_parameter.precision = 2
default_parameter.minimum_allowed_value = -99999.0
default_parameter.maximum_allowed_value = 99999.0
default_parameter.help_text = tr(
'The default allowable quantity per person. ')
default_parameter.description = tr(
"The <b>default</b> is the def |
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from typing import AsyncIterable
import grpc
from grpc.aio._metadata import Metadata
from grpc.aio._typing import MetadataKey
from grpc.aio._typing import MetadataValue
from grpc.aio._typing import MetadatumType
from grpc.experimental import aio
from tests.unit.framework.common import test_constants
ADHOC_METHOD = '/test/AdHoc'
def seen_metadata(expected: Metadata, actual: Metadata):
return not bool(set(tuple(expected)) - set(tuple(actual)))
def seen_metadatum(expected_key: MetadataKey, expected_value: MetadataValue,
actual: Metadata) -> bool:
obtained = actual[expected_key]
return obtained == expected_value
async def block_until_certain_state(channel: aio.Channel,
expected_state: grpc.ChannelConnectivi | ty):
state = channel.get_state()
while state != expected_state:
await cha | nnel.wait_for_state_change(state)
state = channel.get_state()
def inject_callbacks(call: aio.Call):
first_callback_ran = asyncio.Event()
def first_callback(call):
# Validate that all resopnses have been received
# and the call is an end state.
assert call.done()
first_callback_ran.set()
second_callback_ran = asyncio.Event()
def second_callback(call):
# Validate that all responses have been received
# and the call is an end state.
assert call.done()
second_callback_ran.set()
call.add_done_callback(first_callback)
call.add_done_callback(second_callback)
async def validation():
await asyncio.wait_for(
asyncio.gather(first_callback_ran.wait(),
second_callback_ran.wait()),
test_constants.SHORT_TIMEOUT)
return validation()
class CountingRequestIterator:
def __init__(self, request_iterator):
self.request_cnt = 0
self._request_iterator = request_iterator
async def _forward_requests(self):
async for request in self._request_iterator:
self.request_cnt += 1
yield request
def __aiter__(self):
return self._forward_requests()
class CountingResponseIterator:
def __init__(self, response_iterator):
self.response_cnt = 0
self._response_iterator = response_iterator
async def _forward_responses(self):
async for response in self._response_iterator:
self.response_cnt += 1
yield response
def __aiter__(self):
return self._forward_responses()
class AdhocGenericHandler(grpc.GenericRpcHandler):
"""A generic handler to plugin testing server methods on the fly."""
_handler: grpc.RpcMethodHandler
def __init__(self):
self._handler = None
def set_adhoc_handler(self, handler: grpc.RpcMethodHandler):
self._handler = handler
def service(self, handler_call_details):
if handler_call_details.method == ADHOC_METHOD:
return self._handler
else:
return None
|
def _save_device_metadata(self, context, instance, block_device_info):
"""Builds a metadata object for instance devices, that maps the user
provided tag to the hypervisor assigned device address.
"""
metadata = []
metadata.extend(self._get_vif_metadata(context, instance.uuid))
if block_device_info:
metadata.extend(self._block_dev_man.get_bdm_metadata(
context, instance, block_device_info))
if metadata:
instance.device_metadata = objects.InstanceDeviceMetadata(
devices=metadata)
def set_boot_order(self, instance_name, vm_gen, block_device_info):
boot_order = self._block_dev_man.get_boot_order(
vm_gen, block_device_info)
LOG.debug("Setting boot order for instance: %(instance_name)s: "
"%(boot_order)s", {'instance_name': instance_name,
'boot_order': boot_order})
self._vmutils.set_boot_order(instance_name, boot_order)
@check_admin_permissions
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None):
"""Create a new VM and start it."""
LOG.info(_LI("Spawning new instance"), instance=instance)
instance_name = instance.name
if self._vmutils.vm_exists(instance_name):
raise exception.InstanceExists(name=instance_name)
# Make sure we're starting with a clean slate.
self._delete_disk_files(instance_name)
vm_gen = self.get_image_vm_generation(instance.uuid, image_meta)
self._block_dev_man.validate_and_update_bdi(
instance, image_meta, vm_gen, block_device_info)
root_device = block_device_info['root_disk']
self._create_root_device(context, instance, root_device, vm_gen)
self._create_ephemerals(instance, block_device_info['ephemerals'])
try:
with self.wait_vif_plug_events(instance, network_info):
# waiting will occur after the instance is created.
self.create_instance(instance, network_info, root_device,
block_device_info, vm_gen, image_meta)
self._save_device_metadata(context, instance, block_device_info)
if configdrive.required_by(instance):
configdrive_path = self._create_config_drive(context,
instance,
injected_files,
admin_password,
network_info)
self.attach_config_drive(instance, configdrive_path, vm_gen)
self.set_boot_order(instance.name, vm_gen, block_device_info)
self.power_on(instance, network_info=network_info)
except Exception:
with excutils.save_and_reraise_exception():
self.destroy(instance)
@contextlib.contextmanager
def wait_vif_plug_events(self, instance, network_info):
timeout = CONF.vif_plugging_timeout
events = self._get_neutron_events(network_info)
try:
with self._virtapi.wait_for_instanc | e_event(
instance, events, deadline=timeout,
error_callback=self._neutron_failed_callback):
yield
except etimeout.Timeout:
# We never heard from Neutron
LOG.warning(_LW('Timeout waiting for vif plugging callback for '
| 'instance.'), instance=instance)
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
def _neutron_failed_callback(self, event_name, instance):
LOG.error(_LE('Neutron Reported failure on event %s'),
event_name, instance=instance)
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
def _get_neutron_events(self, network_info):
# NOTE(danms): We need to collect any VIFs that are currently
# down that we expect a down->up event for. Anything that is
# already up will not undergo that transition, and for
# anything that might be stale (cache-wise) assume it's
# already up so we don't block on it.
if utils.is_neutron() and CONF.vif_plugging_timeout:
return [('network-vif-plugged', vif['id'])
for vif in network_info if vif.get('active') is False]
else:
return []
def create_instance(self, instance, network_info, root_device,
block_device_info, vm_gen, image_meta):
instance_name = instance.name
instance_path = os.path.join(CONF.instances_path, instance_name)
secure_boot_enabled = self._requires_secure_boot(instance, image_meta,
vm_gen)
memory_per_numa_node, cpus_per_numa_node = (
self._get_instance_vnuma_config(instance, image_meta))
if memory_per_numa_node:
LOG.debug("Instance requires vNUMA topology. Host's NUMA spanning "
"has to be disabled in order for the instance to "
"benefit from it.", instance=instance)
if CONF.hyperv.dynamic_memory_ratio > 1.0:
LOG.warning(_LW(
"Instance vNUMA topology requested, but dynamic memory "
"ratio is higher than 1.0 in nova.conf. Ignoring dynamic "
"memory ratio option."), instance=instance)
dynamic_memory_ratio = 1.0
vnuma_enabled = True
else:
dynamic_memory_ratio = CONF.hyperv.dynamic_memory_ratio
vnuma_enabled = False
if instance.pci_requests.requests:
# NOTE(claudiub): if the instance requires PCI devices, its
# host shutdown action MUST be shutdown.
host_shutdown_action = os_win_const.HOST_SHUTDOWN_ACTION_SHUTDOWN
else:
host_shutdown_action = None
self._vmutils.create_vm(instance_name,
vnuma_enabled,
vm_gen,
instance_path,
[instance.uuid])
self._vmutils.update_vm(instance_name,
instance.flavor.memory_mb,
memory_per_numa_node,
instance.flavor.vcpus,
cpus_per_numa_node,
CONF.hyperv.limit_cpu_features,
dynamic_memory_ratio,
host_shutdown_action=host_shutdown_action)
self._configure_remotefx(instance, vm_gen)
self._vmutils.create_scsi_controller(instance_name)
self._attach_root_device(instance_name, root_device)
self._attach_ephemerals(instance_name, block_device_info['ephemerals'])
self._volumeops.attach_volumes(
block_device_info['block_device_mapping'], instance_name)
# For the moment, we use COM port 1 when getting the serial console
# log as well as interactive sessions. In the future, the way in which
# we consume instance serial ports may become configurable.
#
# Note that Hyper-V instances will always have 2 COM ports
serial_ports = {
constants.DEFAULT_SERIAL_CONSOLE_PORT:
constants.SERIAL_PORT_TYPE_RW}
self._create_vm_com_port_pipes(instance, serial_ports)
for vif in network_info:
LOG.debug('Creating nic for instance', instance=instance)
self._vmutils.create_nic(instance_name,
vif['id'],
vif['address'])
if CONF.hyperv.enable_instance_metrics_collection:
self._metricsutils.enable_vm_metrics_collection(instance_name)
|
r, False otherwise.
"""
try:
int(output)
error = False
except ValueError as err:
error = True
LOGGER.error(err)
return error
def count_jobs():
"""
Count the number of jobs in the queue on the cluster
:return: number of jobs in the queue
"""
if command_found(cmd=DAX_SETTINGS.get_cmd_submit()):
cmd = DAX_SETTINGS.get_cmd_count_nb_jobs()
output = sb.check_output(cmd, shell=True)
error = c_output(output)
while error:
LOGGER.info(' try again to access number of jobs in 2 seconds.')
time.sleep(2)
output = sb.check_output(cmd, shell=True)
error = c_output(output)
if int(output) < 0:
return 0
else:
return int(output)
else:
LOGGER.info(' Running locally. No queue with jobs.')
return 0
def job_status(jobid):
"""
Get the status for a job on the cluster
:param jobid: job id to check
:return: job status
"""
cmd = DAX_SETTINGS.get_cmd_get_job_status()\
.safe_substitute({'jobid': jobid})
LOGGER.debug(str(cmd).strip())
try:
output = sb.check_output(cmd, stderr=sb.STDOUT, shell=True)
LOGGER.debug('output='+str(output))
output = output.decode().strip()
if output == DAX_SETTINGS.get_running_status():
return 'R'
elif output == DAX_SETTINGS.get_queue_status():
return 'Q'
elif output == DAX_SETTINGS.get_complete_status() or len(output) == 0:
return 'C'
else:
return None
except sb.CalledProcessError as e:
LOGGER.debug(str(e))
return None
def is_traceable_date(jobdate):
"""
Check if the job is traceable on the cluster
:param jobdate: launching date of the job
:return: True if traceable, False otherwise.
"""
try:
trace_date = datetime.strptime(jobdate, "%Y-%m-%d")
diff_days = (datetime.today() - trace_date).days
return diff_days <= MAX_TRACE_DAYS
except ValueError:
return False
def tracejob_info(jobid, jobdate):
"""
Trace the job information from the cluster
:param jobid: job id to check
:param jobdate: launching date of the job
:return: dictionary object with 'mem_used', 'walltime_used', 'jobnode'
"""
time_s = datetime.strptime(jobdate, "%Y-%m-%d")
diff_days = (datetime.today() - time_s).days + 1
jobinfo = dict()
jobinfo['mem_used'] = get_job_mem_used(jobid, diff_days)
jobinfo['walltime_used'] = get_job_walltime_used(jobid, diff_days)
jobinfo['jobnode'] = get_job_node(jobid, diff_days)
return jobinfo
def get_job_mem_used(jobid, diff_days):
"""
Get the memory used for the task from cluster
:param jobid: job id to check
:param diff_days: difference of days between starting date and now
:return: string with the memory usage, empty string if error
"""
mem = ''
# Check for blank jobid
if not jobid:
return mem
cmd = DAX_SETTINGS.get_cmd_get_job_memory()\
.safe_substitute({'numberofdays': diff_days,
'jobid': jobid})
try:
output = sb.check_output(cmd, stderr=sb.STDOUT, shell=True)
if output.startswith(b'sacct: error'):
raise ClusterError(output)
if output:
mem = output.strip()
mem = mem.decode()
except (sb.CalledProcessError, ClusterError):
pass
return mem
def get_job_walltime_used(jobid, diff_days):
"""
Get the walltime used for the task from cluster
:param jobid: job id to check
:param diff_days: difference of days between starting date and now
:return: string with the walltime used, empty string if error
"""
walltime = ''
# Check for blank jobid
if not jobid:
retur | n walltime
|
cmd = DAX_SETTINGS.get_cmd_get_job_walltime()\
.safe_substitute({'numberofdays': diff_days,
'jobid': jobid})
try:
output = sb.check_output(cmd, stderr=sb.STDOUT, shell=True)
if output:
walltime = output.strip()
walltime = walltime.decode()
except sb.CalledProcessError:
pass
if not walltime and diff_days > 3:
walltime = 'NotFound'
return walltime
def get_job_node(jobid, diff_days):
"""
Get the node where the job was running on the cluster
:param jobid: job id to check
:param diff_days: difference of days between starting date and now
:return: string with the node, empty string if error
"""
jobnode = ''
# Check for blank jobid
if not jobid:
return jobnode
if jobid == 'no_qsub':
cmd = 'uname -a'
output = sb.check_output(cmd, stderr=sb.STDOUT, shell=True)
if output and len(output.strip().split(' ')) > 1:
jobnode = output.strip().split(' ')[1]
return jobnode
cmd = DAX_SETTINGS.get_cmd_get_job_node()\
.safe_substitute({'numberofdays': diff_days,
'jobid': jobid})
try:
output = sb.check_output(cmd, stderr=sb.STDOUT, shell=True)
if output:
jobnode = output.strip()
jobnode = jobnode.decode()
except sb.CalledProcessError:
pass
return jobnode
def get_specific_str(big_str, prefix, suffix):
"""
Extract a specific length out of a string
:param big_str: string to reduce
:param prefix: prefix to remove
:param suffix: suffix to remove
:return: string reduced, return empty string if prefix/suffix not present
"""
specific_str = big_str
if prefix and len(specific_str.split(prefix)) > 1:
specific_str = specific_str.split(prefix)[1]
if suffix and len(specific_str.split(suffix)) > 1:
specific_str = specific_str.split(suffix)[0]
if specific_str != big_str:
return specific_str
else:
return ''
def command_found(cmd='qsub'):
""" Return True if the command was found."""
if True in [os.path.isfile(os.path.join(path, cmd)) and
os.access(os.path.join(path, cmd), os.X_OK)
for path in os.environ["PATH"].split(os.pathsep)]:
return True
return False
class PBS(object): # The script file generator class
""" PBS class to generate/submit the cluster file to run a task """
def __init__(self, filename, outfile, cmds, walltime_str, mem_mb=2048,
ppn=1, env=None, email=None,
email_options=DAX_SETTINGS.get_email_opts(), rungroup=None,
xnat_host=None, job_template=None):
"""
Entry point for the PBS class
:param filename: filename for the script
:param outfile: filepath for the outlogs
:param cmds: commands to run in the script
:param walltime_str: walltime to set for the script
:param mem_mb: memory in mb to set for the script
:param ppn: number of processor to set for the script
:param env: Environment file to source for the script
:param email: email address to set for the script
:param email_options: email options to set for the script
:param rungroup: group to run job under on the cluster
:param xnat_host: set the XNAT_HOST for the job (export)
:return: None
"""
self.filename = filename
self.outfile = outfile
self.cmds = cmds
self.walltime_str = walltime_str
self.mem_mb = mem_mb
self.email = email
self.email_options = email_options
self.rungroup = rungroup
self.ppn = ppn
self.job_template = job_template
if env:
self.env = env
else:
self.env = os.path.join(os.environ['HOME'], '.bashrc')
if xnat_host:
self.xnat_host = xnat_host
else:
self.xnat_host = os.environ['XNAT_HOST']
def write(self):
"""
Write the file
:return: |
import argparse
from cheat_ext.info import info, ls
from cheat_ext.installer import (
install, upgrade, remove
)
from cheat_ext.linker import link, unlink
def _install(args):
install(args.repository)
link(args.repository)
def _upgrade(args):
upgrade(args.repository)
link(args.repository)
def _remove(args):
unlink(args.repository)
remove(args.repository)
def _info(args):
info(args.repository)
def _ls(args):
ls()
parser = argparse.ArgumentParser(description="cheat extension")
subparsers = parser.add_subparsers()
install_parser = subparsers.add_parser("install")
install_parser.add_argument("repository", type=str)
install_parser.set_defaults(func=_install)
upgrade_parser = subparsers.add_parser("upgrade")
upgrade_parser.add_argument("reposi | tory", type=str)
upgrade_parser.set_defaults(func=_upgrade)
remove_parser = subparsers.add_parser("remove")
remove_parser.add_argument("repository", type=str)
remove_parser.set_defaults(func=_remove)
info_parser = subparsers.add_parser("info")
inf | o_parser.add_argument("repository", type=str)
info_parser.set_defaults(func=_info)
ls_parser = subparsers.add_parser("ls")
ls_parser.set_defaults(func=_ls)
def main():
options = parser.parse_args()
options.func(options)
|
from datetime import datetime
from os.path import abspath, join, dirname
import alabaster
# Alabaster theme + mini-extension
html_theme_path = [alabaster.get_path()]
extensions = ['alabaster', 'sphinx.ext.intersphinx', 'sphinx.ext.doctest']
# Paths relative to invoking conf.py - not this shared file
html_theme = 'alabaster'
html_theme_options = {
'description': "Pythonic task execution",
'github_user': 'pyinvoke',
'github_repo': 'invoke',
'analytics_id': 'UA-18486793-3',
'travis_button': True,
'codecov_button': True,
}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'searchbox.html',
'donate.html',
]
}
# Everything intersphinx's to Python
intersphinx_mapping = {
'python': ('https://docs.python.org/2.7/', None),
}
# Doctest settings
doctest_path = [abspath(join(dirname(__file__), '..', 'tests'))]
doctest_global_setup = r"""
from _util import MockSubprocess
"""
# Regular settings
project = 'Invoke'
year = datetime.now().year
copyright = '{} Jeff Forcier'.for | mat(year)
master_doc = 'index'
templates_path = ['_templates']
exclu | de_trees = ['_build']
source_suffix = '.rst'
default_role = 'obj'
|
# -*- coding: utf8 -*-
'''
Copyright 2009 Denis Derman <denis.spir@gmail.com> (former developer)
Copyright 2011-2012 Peter Potrowl <peter017@gmail.com> (current developer)
This file is part of Pijnu.
Pijnu is free software: you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Pijnu is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with Pijnu. If not, see <http://www.gnu.org/licenses/>.
'''
""" wikiLine
lineChar : [\x20..\x7e]
rawChar : [\x20..\x7e !!/!_]
DISTINCT : "//" : drop
IMPORTANT : "!!" : drop
MONOSPACE : "__" : drop
rawText : rawChar+ : join
distinctText : DISTINCT inline DISTINCT : liftValue
importantText : IMPORTANT inline IMPORTANT : liftValue
monospaceText : MONOSPACE inline MONOSPACE : liftValue
styledText : distinctText / importantText / monospaceText
text : styledText / rawText
inline : @ text+
"""
from pijnu import *
# title: wikiLine
inline = Recursion()
lineChar = Klass(' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~')
rawChar = Klass(' "#$%&\'()*+,-.0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^`abcdefghijklmnopqrstuvwxyz{|}~ ')
DISTINCT = Word('//')(drop)
IMPORTANT = Word('!!')(drop)
MONOSPACE = Word('__')(drop)
rawText = OneOrMore(rawChar)(join)
distinctText = Sequence(DISTINCT, inline, DISTINCT)(liftValue)
importantTex | t = Sequence(IMPORTANT, inline, IMPORTANT)(liftValue)
monospaceText = Sequence(MONOSPACE, inline, MONOSPACE)( | liftValue)
styledText = Choice(distinctText, importantText, monospaceText)
text = Choice(styledText, rawText)
inline **= OneOrMore(text)
parser = Parser('wikiLine', locals(), 'inline')
|
e_group_name: str,
account_name: str,
backup_policy_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-08-01"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str'),
"backupPolicyName": _SERIALIZER.url("backup_policy_name", backup_policy_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
class BackupPoliciesOperations(object):
"""BackupPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.netapp.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> Iterable["_models.BackupPoliciesList"]:
"""List backup policies.
List backup policies for Netapp Account.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BackupPoliciesList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.netapp.models.BackupPoliciesList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupPoliciesList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("BackupPoliciesList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
account_name: str,
backup_policy_name: str,
**kwargs: Any
) -> "_models.BackupPolicy":
"""Get a backup Policy.
Get a particular backup Policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:param backup_policy_name: Backup policy Name which uniquely identify backup policy.
:type backup_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackupPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.netapp.models.BackupPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
backup_policy_name=backup_policy_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BackupPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupN | ame}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}'} # type: ignore
def _create_initial(
self,
resource_group_name: str,
account_name: str,
backup_policy_name: str,
body: "_models.BackupPolicy",
**kwargs: Any
) -> Optional["_models.BackupPolicy"]:
cls = kwargs.pop('cls', None) # type: ClsType[O | ptional["_models.BackupPolicy"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(body, 'BackupPolicy')
request = build_create_request_initi |
# -*- coding: utf-8 -*-
import logging, logging.handlers
from django.conf import settings
def get_logger(name, level=logging.INFO, format='[%(asctime)s] %(message)s', handler=None, filename=None):
new_logger = logging.getLog | ger(name)
new_logger.setLevel(level)
if not handler:
filename = filename or '%s/logs/%s.log' % (settings.HOME_DIR, name)
handler = logging.FileHandler(filename)
handler.setFormatter(logging.Formatter(format))
new_logger.addHandler(handler)
return new_logger
if hasattr(settings, 'LOG_FILENAME') and not logger:
handler = logging.handlers.TimedRotat | ingFileHandler(settings.LOG_FILENAME, when = 'midnight')
logger = get_logger('default', handler=handler)
|
class Solution:
def containVirus(self, grid: List[List[int]]) -> int:
current_set_number = 1
grid_set = [[0 for i in range(len(grid[0]))] for j in range(len(grid))]
set_grid = {}
threaten = {}
def getAdjacentCellsSet(row, col) -> List[int]:
answer = []
if row != 0 and grid_set[row-1][col] != 0 and grid_set[row-1][col] not in answer:
answer.append(grid_set[row-1][col])
if col != 0 and grid_set[row][col-1] != 0 and grid_set[row][col-1] not in answer:
answer.append(grid_set[row][col-1])
if row != len(g | rid)-1 and grid_set[row+1][col] != 0 and grid_set[row+1][col] not in answer:
answer.append(grid_set[row+1][col])
if col != len(grid[0])-1 and grid_set[row][col+1] != 0 and grid_set[row][col+1] not in answer:
| answer.append(grid_set[row][col+1])
if -1 in answer:
answer.remove(-1)
if grid_set[row][col] in answer:
answer.remove(grid_set[row][col])
return answer
# Merge all regions to the first one.
def merge(regions: List[int]):
merge_to = regions[0]
for i in range(1, len(regions)):
for x, y in set_grid[regions[i]]:
grid_set[x][y] = merge_to
set_grid[merge_to] += set_grid[regions[i]]
del set_grid[regions[i]]
if regions[i] in threaten:
del threaten[regions[i]]
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 1:
adjacent_sets = getAdjacentCellsSet(i, j)
set_number = 0
if len(adjacent_sets) == 0:
set_number = current_set_number
current_set_number += 1
elif len(adjacent_sets) == 1:
set_number = adjacent_sets[0]
else: # Merge
merge(adjacent_sets)
set_number = adjacent_sets[0]
grid_set[i][j] = set_number
if set_number not in set_grid:
set_grid[set_number] = []
set_grid[set_number].append((i, j))
def adjacentThreatened(x, y):
answer = []
if x != 0 and grid_set[x-1][y] == 0:
answer.append((x-1, y))
if y != 0 and grid_set[x][y-1] == 0:
answer.append((x, y-1))
if x != len(grid_set)-1 and grid_set[x+1][y] == 0:
answer.append((x+1, y))
if y != len(grid_set[0])-1 and grid_set[x][y+1] == 0:
answer.append((x, y+1))
return answer
def threatenCells():
for i in set_grid:
if i == 0 or i == -1:
continue
threatened = set()
for x, y in set_grid[i]:
threatened = threatened.union(adjacentThreatened(x, y))
threaten[i] = len(threatened)
def contain(set_number):
wall = 0
for x, y in set_grid[set_number]:
grid_set[x][y] = -1
if x != 0 and grid_set[x-1][y] == 0:
wall += 1
if y != 0 and grid_set[x][y-1] == 0:
wall += 1
if x != len(grid_set)-1 and grid_set[x+1][y] == 0:
wall += 1
if y != len(grid_set[0])-1 and grid_set[x][y+1] == 0:
wall += 1
del set_grid[set_number]
del threaten[set_number]
return wall
def spread():
to_spread = deque()
for _, v in set_grid.items():
to_spread.extend(v)
while len(to_spread) > 0:
x, y = to_spread.popleft()
current_set = grid_set[x][y]
if x != 0 and grid_set[x-1][y] == 0:
grid_set[x-1][y] = current_set
set_grid[current_set].append((x-1, y))
adj = getAdjacentCellsSet(x-1, y)
merge([current_set]+adj)
if y != 0 and grid_set[x][y-1] == 0:
grid_set[x][y-1] = current_set
set_grid[current_set].append((x, y-1))
adj = getAdjacentCellsSet(x, y-1)
merge([current_set]+adj)
if x != len(grid_set)-1 and grid_set[x+1][y] == 0:
grid_set[x+1][y] = current_set
set_grid[current_set].append((x+1, y))
adj = getAdjacentCellsSet(x+1, y)
merge([current_set]+adj)
if y != len(grid_set[0])-1 and grid_set[x][y+1] == 0:
grid_set[x][y+1] = current_set
set_grid[current_set].append((x, y+1))
adj = getAdjacentCellsSet(x, y+1)
merge([current_set]+adj)
answer = 0
threatenCells()
# print(grid_set)
# print(answer)
while len(threaten) != 0:
# print(threaten)
largest_infected = sorted(
threaten.items(), key=lambda x: x[1], reverse=True)[0]
answer += contain(largest_infected[0])
spread()
# print(grid_set)
# print(answer)
threatenCells()
return answer
|
import copy
from .point import Point
from .misc import *
'''
Line is defined using two point(s).
'''
class Line(object):
_ID_NAME = '_LINE_ID'
_DB_NAME = '_EXISTING_LINES'
def __init__(self, geom, p0, p1):
def check(p):
if geom is None: return p
if isinstance(p, Point):
found,pid = exist(geom,p)
if found: return pid
else:
if geom.get(Point,p) is not None: return p
return None
assert isinstance(p0, (Point, int, long))
assert isinstance(p1, (Point, int, long))
self.pid = [check(p0), check(p1)]
if self.pid[0] is None: raise RuntimeError("Line: Point p0 does not exist in geo-file")
if self.pid[1] is None: raise RuntimeError("Line: Point p1 does not exist in geo-file")
if self.pid[0] == self.pid[1]: raise RuntimeError("Line: Cannot construct lines of zero length")
return
# for printing to terminal
def __repr__(self):
return "l("+remove_bracket(str(self.dataFromKey(self.key())))+")"
def code(self, geom):
'''
Return the code for use in the geo-file
'''
# we do not allow the same line to be added twice
# self.exist(...) should return a (new) idx if not found
| found,idx = exist(geom,self)
if found: return ''
return '\n'.join([('Line(%d) = {%d,%d};') % (idx,self.pid[0], | self.pid[1])])
# NOTE: for uniqueness the sorted idx is used as "key" in the database
def key(self, master=False):
keystr=remove_bracket(str(sorted(map(abs,self.pid)) + self.pid))
if master:
return remove_bracket(str(sorted(map(abs,self.pid))))
return keystr
# this is an alternative constructor which can be called directly as "Line.fromkey(keystr)"
@classmethod
def fromkey(cls, keystr):
pid=cls.dataFromKey(keystr)
return Line(None, pid[0], pid[1])
@classmethod
def masterDBKeys(cls, geom):
subkeys=copy.deepcopy(getDB(geom,cls).keys())
for i in range(0,len(subkeys)):
tmp=subkeys[i].split(',')
subkeys[i]=",".join(tmp[:len(tmp)/2])
return subkeys
@staticmethod
def dataFromKey(keystr):
return [int(i) for i in keystr.split(',')][2:]
|
#!/usr/bin/env python2
# Copyright (C) 2013-2014 Computer Sciences Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
setup(
name='ez | bake-support-django',
version='2.1',
description='Supporting library for integrating Django applications with EzBake.',
license='Apache License 2.0',
author='EzBake Developers',
author_email='developers@ezbake.io',
namespace_packages=['ezbake', 'ez | bake.support'],
packages=find_packages('lib', exclude=['test*']),
package_dir={'': 'lib'},
install_requires=[
'ezbake-security-client==2.1',
'Django>=1.4',
'psycopg2>=2.5',
],
)
|
#!/usr/bin/env python
# encoding: utf-8
"""An example for a function returning a function"""
def surround(tag1, tag2):
def wraps(content):
return '{}{}{}'.format(tag1, content, tag2)
re | turn wraps
def printer(content, transform):
return transform(content)
print printer("foo bar", surround("<a>", "</a>"))
print printer("foo bar", s | urround('<p>', '</p>'))
|
"""Module containing the logic for our debugging logic."""
from __future__ import print_function
import json
import platform
| import setuptools
def print_information(option, option_string, value, parser,
option_manager=None):
"""Print debugging information used in bug reports.
:param option:
The optparse Option instance.
:type option:
optparse.Option
:param str option_string:
The option name
:param value:
The value passed to the callback parsed fro | m the command-line
:param parser:
The optparse OptionParser instance
:type parser:
optparse.OptionParser
:param option_manager:
The Flake8 OptionManager instance.
:type option_manager:
flake8.options.manager.OptionManager
"""
if not option_manager.registered_plugins:
# NOTE(sigmavirus24): Flake8 parses options twice. The first time, we
# will not have any registered plugins. We can skip this one and only
# take action on the second time we're called.
return
print(json.dumps(information(option_manager), indent=2, sort_keys=True))
raise SystemExit(False)
def information(option_manager):
"""Generate the information to be printed for the bug report."""
return {
'version': option_manager.version,
'plugins': plugins_from(option_manager),
'dependencies': dependencies(),
'platform': {
'python_implementation': platform.python_implementation(),
'python_version': platform.python_version(),
'system': platform.system(),
},
}
def plugins_from(option_manager):
"""Generate the list of plugins installed."""
return [{'plugin': plugin, 'version': version}
for (plugin, version) in sorted(option_manager.registered_plugins)]
def dependencies():
"""Generate the list of dependencies we care about."""
return [{'dependency': 'setuptools', 'version': setuptools.__version__}]
|
text="Access to the member's username. This implicitly enables access to anything the user is publicly sharing on Open Humans. Note that this is potentially sensitive and/or identifying.", verbose_name='Are you requesting Open Humans usernames?')),
('approved', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('api_access_secret', models.CharField(max_length=64)),
],
options={
'verbose_name_plural': 'Data request activities',
},
),
migrations.CreateModel(
name='DataRequestProjectMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id_code', models.CharField(max_length=16)),
('message_permission', models.BooleanField()),
('username_shared', models.BooleanField()),
('sources_shared', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), size=None)),
('member', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='open_humans.Member')),
],
),
migrations.CreateModel(
name='OAuth2DataRequestProject',
fields=[
('datarequestproject_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='private_sharing.DataRequestProject')),
('enrollment_url', models.URLField(help_text="The URL we direct members to if they're interested in sharing data with your project.", verbose_name='Enrollment URL')),
('redirect_url', models.CharField(help_text='The return URL for our "authorization code" OAuth2 grant\n process. You can <a target="_blank" href="">read more about OAuth2\n "authorization code" transactions here</a>.', max_length=256, verbose_name='Redirect URL')),
('application', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.OAUTH2_PROVIDER_APPLICATION_MODEL)),
],
options={
'verbose_name': 'OAuth2 data request project',
},
bases=('private_sharing.datarequestproject',),
),
migrations.CreateModel(
name='OnSiteDataRequestProject',
fields=[
('datarequestproject_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='private_sharing.DataRequestProject')),
('consent_text', models.TextField(help_text='The "informed consent" text that describes your project to Open Humans members.')),
('post_sharing_url', models.URLField(blank=True, help_text='If provided, after authorizing sharing the\nmember will be taken to this URL. If this URL includes "PROJECT_MEMBER_ID"\nwithin it, we will replace that with the member\'s project-specific\nproject_member_id. This allows you to direct them to an external survey you\noperate (e.g. using Google Forms) where a pre-filled project_member_id field\nallows you to connect those responses to corresponding data in Open Humans.', verbose_name='Post-sharing URL')),
],
options={
'verbose_name': 'On-site data request project',
},
bases=('private_sharing.datarequestproject',),
),
migrations.AddField(
model_name='datarequestprojectmember',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='project_members', to='private | _sharing.DataRequestProject'),
),
migrations.AddField(
model_name='datarequestproject',
name='coordinator',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='open_humans.Member'),
),
migrations.AlterField(
model_name='datarequestproject',
| name='long_description',
field=models.TextField(max_length=1000, verbose_name='A long description (1000 characters max)'),
),
migrations.AlterField(
model_name='datarequestproject',
name='short_description',
field=models.CharField(max_length=140, verbose_name='A short description (140 characters max)'),
),
migrations.AlterField(
model_name='datarequestprojectmember',
name='member',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='open_humans.Member'),
),
migrations.RenameField(
model_name='datarequestprojectmember',
old_name='user_id_code',
new_name='project_member_id',
),
migrations.AlterField(
model_name='datarequestprojectmember',
name='project_member_id',
field=models.CharField(max_length=16, unique=True),
),
migrations.AlterField(
model_name='datarequestproject',
name='request_sources_access',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), default=list, help_text='List of sources this project is requesting access to on Open Humans.', size=None, verbose_name="Data sources you're requesting access to"),
),
migrations.AlterField(
model_name='datarequestproject',
name='active',
field=models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], default=True, help_text='"Active" status is required to perform authorization\nprocesses, including during drafting stage. If a project is not active, it\nwon\'t show up in listings, and new data sharing authorizations cannot occur.\nProjects which are "active" but not approved may have some information shared\nin an "In Development" section, so Open Humans members can see potential\nupcoming studies.'),
),
migrations.AddField(
model_name='datarequestprojectmember',
name='created',
field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2016, 3, 4, 5, 14, 50, 931889, tzinfo=utc)),
preserve_default=False,
),
migrations.AlterField(
model_name='datarequestprojectmember',
name='message_permission',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='datarequestprojectmember',
name='sources_shared',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), default=list, size=None),
),
migrations.AlterField(
model_name='datarequestprojectmember',
name='username_shared',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='datarequestproject',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False, populate_from='name', unique=True),
),
migrations.AddField(
model_name='datarequestprojectmember',
name='revoked',
field=models.BooleanField(default=False),
),
migrations.AlterModelOptions(
name='datarequestproject',
options={},
),
migrations.AddField(
model_name='datarequestprojectmember',
name='authorized',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='datarequestproject',
name='slug',
field=autoslug.fields.AutoSlugField(always_update=True, editable=False, populate_from='name', unique=True),
),
migrations.AddField(
model_name='datarequestproject',
name='is_academic_or_nonprofit',
field=models. |
from setuptools import setup, find_packages
setup(
name="simple-crawler",
version="0.1",
url="https://github.com/shonenada/crawler",
author="shonenada",
author_email="shone | nada@gmail.com",
description="Simple crawler",
zip_safe=True,
platforms="any",
packages=find_packages(),
install_requires=["reque | sts==2.2.1"],
)
|
# -*- mode: python -*-
# -*- coding: iso8859-15 -*-
##############################################################################
#
# Gestion scolarite IUT
#
# Copyright (c) 2001 - 2006 Emmanuel Viennet. Al | l rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, | or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Emmanuel Viennet emmanuel.viennet@viennet.net
#
##############################################################################
"""Basic User management
"""
???????????????
|
1 < len(screw_axes):
next_axis = np.dot(np.transpose(M_rot), screw_axes[i + 1])
ideal_axis = geometry.rotation_matrix_to_axis_and_angle(
theta_tau_to_rotation_matrix(THETA_MEAN, TAU_MEAN))[0]
theta, tau = theta_tau_for_nexus(next_axis, ideal_axis)
if not check_theta_tau(theta, tau):
raise Exception("The value of theta or tau beyond the limits.")
M_local = theta_tau_to_rotation_matrix(theta, tau)
M_rot = np.dot(M_rot, M_local)
thetas.append(theta)
taus.append(tau)
return thetas, taus, M_init
def generate_super_coil(axis, omega, pitch_angle, length):
'''Generate a alpha helix super coil.
Return a list of Ca coordinates.
'''
axis = geometry.normalize(axis)
M_rot = geometry.rotation_matrix_from_axis_and_angle(axis, omega)
# Get the screw axes
axis_perpendicular = None
if np.abs(axis[0]) > 0.01:
axis_perpendicular = geometry.normalize(
np.array([axis[1], -axis[0], 0]))
else:
axis_perpendicular = geometry.normalize(
np.array([0, axis[2], -axis[1]]))
screw_seed = np.dot(geometry.rotation_matrix_from_axis_and_angle(
axis_perpendicular, pitch_angle), axis)
screw_axes = [screw_seed]
for i in range(1, length):
screw_axes.append(np.dot(M_rot, screw_axes[i - 1]))
# Generate the helix
return generate_alpha_helix_from_screw_axes(screw_axes, relieve_strain=True)
### Functions to perturb an existing helix
def randomize_a_helix(ca_list, ratio):
'''Randomize internal coordinates of a helix. Only int(ratio * len(ca_list))
residues are perturbed.
'''
ds, thetas, taus = basic.get_internal_coordinates_from_ca_list(ca_list)
num_to_perturb = int(ratio * len(ca_list))
res_to_perturb = np.random.permutation(len(ca_list) - 3)[:num_to_perturb]
for i in res_to_perturb:
theta = np.random.normal(THETA_MEAN, THETA_STD)
tau = np.random.normal(TAU_MEAN, TAU_STD)
if check_theta_tau(theta, tau):
thetas[i] = theta
taus[i] = tau
perturbed_ca_list = basic.generate_segment_from_internal_coordinates(ds, thetas, taus)
# Superimpose the perturbed ca list to the old list
M, t = geometry.get_superimpose_transformation(perturbed_ca_list, ca_list)
perturbed_ca_list = [ np.dot(M, p) + t for p in perturbed_ca_list]
return perturbed_ca_list
def shift_helix_phase(ca_list, phase_shift):
'''Shift the phase of a helix without changing
it's direction.
'''
# Get the screw axes
screw_axes = []
for i in range(1, len(ca_list) - 2):
M1 = geometry.create_frame_from_three_points(
ca_list[i - 1], ca_list[i], ca_list[i + 1])
M2 = geometry.create_frame_from_three_points(
ca_list[i], ca_list[i + 1], ca_list[i + 2])
screw_axes.append(geometry.rotation_matrix_to_axis_and_angle(
np.dot(np.transpose(M2), M1))[0])
# Get the initial rotation matrix for helix generation
M1 = geometry.create_frame_from_three_points(
ca_list[0], ca_list[1], ca_list[2])
M_init = np.dot(geometry.rotation_matrix_from_axis_and_angle(
screw_axes[0], phase_shift), np.transpose(M1))
# Calculate the Ca coordinates
shifted_ca_list = generate_alpha_helix_from_screw_axes(screw_axes, relieve_strain=True, M_init=M_init)
t = np.mean(ca_list, axis=0) - np.mean(shifted_ca_list, axis=0)
for i in range(len(shifted_ca_list)):
shifted_ca_list[i] = shifted_ca_list[i] + t
return shifted_ca_list
def twist_helix(ca_list, axis, pitch_angle, omega, ratio):
'''Twist a helix, making it closer to a super coil who is defined
by axis, pitch_angle and omega. int(ratio * len(ca_list)) minimum
twist units (each with 6 residues) are perturbed.
'''
ds, thetas, taus = basic.get_internal_coordinates_from_ca_list(ca_list)
M_init = geometry.create_frame_from_three_points(
ca_list[0], ca_list[1], ca_list[2])
# Get residues to perturb
num_to_perturb = int(ratio * len(ca_list))
res_to_perturb = sorted(np.random.permutation(len(taus) - 2)[:num_to_perturb])
# Get the perturbed internal coordinates
for i in range(len(taus)):
if i in res_to_perturb:
new_thetas, new_taus = twist_minimum_unit(thetas[i + 1: i + 4],
taus[i: i + 3], M_init, axis, pitch_angle, omega)
if new_thetas is not None:
for j in range(3):
thetas[i + 1 + j] = new_thetas[j]
taus[i + j] = new_taus[j]
M_init = np.dot(M_init, theta_tau_to_rotation_matrix(thetas[i + 1], taus[i]))
# Get new ca coordinates
new_ca_list = basic.generate_segment_from_internal_coordinates(ds, thetas, taus)
M, t = geometry.get_superimpose_transformation(new_ca_list, ca_list)
new_ca_list = [np.dot(M, p) + t for p in new_ca_list]
return new_ca_list
def twist_minimum_unit(thetas, taus, M_init, axis, pitch_angle, omega):
'''Twist a minimum twist unit.
Return new values for thetas and taus. Return None if the twist failed.
'''
if len(thetas) != 3 or len(taus) != 3:
raise Exception("A minimum twist unit must have 3 angles and 3 torsions!")
screw_axes = []
# Get the new value of the first axis
axis1_local = geometry.rotation_matrix_to_axis_and_angle(
theta_tau_to_rotation_matrix(thetas[0], taus[0]))[0]
axis1_global = np.dot(M_init, axis1_local)
angle_to_rotate = pitch_angle - geometry.angle(axis, axis1_global)
rotate_axis = geometry.normalize(np.cross(axis, axis1_global))
for i in range(1, 5):
M_rot = geometry.rotation_matrix_from_axis_and_angle(
| rotate_axis, angle_to_rotate)
new_axis_global = np.dot(M_rot, axis1_global)
new_axis_local = np.dot(np.transpose(M_init), new_axis_global)
theta, tau = axis_to_theta_tau(new_axis_local)
if check_theta_tau(theta, tau):
screw_axes.append(new_axis_global)
break
angle_to_rotate /= 2
if len(screw_axes) == 0: return None, None
# Get the new screw axes
M_rot = geometry.rotation_matrix_from_axis_and_angle(axis, | omega)
for i in range(1, 3):
screw_axes.append(np.dot(M_rot, screw_axes[i - 1]))
# Get the internal coordinates
try:
thetas, taus, M = get_theta_tau_and_rotation_matrix_from_screw_axes(
screw_axes, M_init=M_init)
except:
return None, None
return thetas[1:], taus
def thread_backbone_for_helix(ca_list):
'''Thread backbone atoms for a ca list of a helix using
the method and parameters from the G. Grigoryan, W. F. DeGrado paper.
Return a list of residue dictionaries.
'''
# Make the N termial residue
residue_list = [{'ca': ca_list[0],
'n': geometry.cartesian_coord_from_internal_coord(ca_list[2],
ca_list[1], ca_list[0], 1.45, np.radians(95.0), np.radians(65.0)),
'c': geometry.cartesian_coord_from_internal_coord(ca_list[2],
ca_list[1], ca_list[0], 1.52, np.radians(21.0), np.radians(-79.0))}]
# Make middle residues
for i in range(1, len(ca_list) - 1):
residue_list.append({'ca': ca_list[i],
'n': geometry.cartesian_coord_from_internal_coord(ca_list[i - 1],
ca_list[i + 1], ca_list[i], 1.45, np.radians(95.0), np.radians(14.0)),
'c': geometry.cartesian_coord_from_internal_coord(ca_list[i + 1],
ca_list[i - 1], ca_list[i], 1.52, np.radians(104.0), np.radians(16.0))})
# Make the N terminal residue
residue_list.append({'ca': ca_list[-1],
'n': geometry.cartesian_coord_from_internal_coord(ca_list[-3],
ca_list[-2], ca_list[-1], 1.45, np.radians(15.0), np.radians(-56.0)),
'c': geometry.cartesian_coord_from_internal_coord(ca_list[-3],
ca_list[-2], ca_list[-1], 1.52, np.radians(104.0), np.radians(67.0))})
# Buil O ato |
from common.models import *
from common.localization import txt, verbose_names
@verbose_names
class Patient(models.Model):
# private
first_name = models.CharField(max_length=80)
last_name = models.CharField(max_length=80)
GENDER = (
(txt('M'), txt('male')),
(txt('F'), txt('female'))
)
gender = models.CharField(max_length=1, choices=GENDER)
BLOOD_TYPE = (
(txt(' | 0Rh-'), txt('0Rh-')),
(txt('0Rh+'), txt('0Rh+')),
(txt('ARh-'), txt('ARh-')),
(txt('ARh+'), txt('ARh+')),
(txt('BRh-'), txt('BRh-')),
(txt('BRh+'), txt('BRh+')),
(txt('ABR-'), txt('ABRh-')),
(txt( | 'ABR+'), txt('ABRh+')),
)
blood_type = models.CharField(max_length=4, choices=BLOOD_TYPE, blank=True, null=True)
birth_date = models.DateField()
pesel = PESELField()
# address
country = models.CharField(max_length=80, default="Polska")
city = models.CharField(max_length=80)
address = models.CharField(max_length=80, blank=True, null=True)
# mailing_address
mailing_country = models.CharField(max_length=80, blank=True, null=True)
mailing_city = models.CharField(max_length=80, blank=True, null=True)
mailing_address = models.CharField(max_length=80, blank=True, null=True)
# work
job = models.CharField(max_length=80, blank=True, null=True)
workplace = models.CharField(max_length=80, blank=True, null=True)
# contact
cell_phone = models.CharField(max_length=80, blank=True, null=True)
landline_phone = models.CharField(max_length=80, blank=True, null=True)
email = models.EmailField(blank=True, null=True)
# injury info
date_of_injury = models.DateField()
time_of_injury = models.TimeField(blank=True, null=True)
date_of_operation = models.DateField(blank=True, null=True)
time_of_operation = models.TimeField(blank=True, null=True)
additional_notes = AdditionalNotesField(blank=True, null=True)
def __str__(self):
return "{0} {1}".format(self.first_name, self.last_name)
class Meta:
ordering = ('last_name', 'first_name')
|
# Generated by Django 2.2.15 on 2020-11-24 06:44
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("assignments", "0015_assignmentvote_delegated_user"),
]
operations = [
migrations.AddField(
model_name="assignmentpoll",
name="db_amount_global_yes",
field=models.DecimalField(
blank=True,
decimal_places=6,
default=Decimal("0"),
max_digits=15,
null=True,
validators=[django.core.validators.MinValueValidator(Decimal("-2"))],
),
),
migrations.AddField(
model_name="assignmentpoll",
name="global_yes",
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name="assignmentpoll",
name="pollmethod",
field=models.CharField(
choices=[
("votes", "Yes per candidate"),
("N", "No per candidate"),
("YN", "Yes/No per candidate"),
("YNA", "Yes/No/Abstain per candidate"),
],
max_length=5,
),
),
migrations.AlterField(
model_name="assignmentpoll",
name="onehundred_percent_base",
field=models.CharField(
choices=[
("YN", "Yes/No per candidate"),
("YNA", "Yes/No/Abstain per candidate"),
("Y", "Sum of votes including general No/Abstain"),
("valid", "All valid ballots"),
("cast", "All casted ballots"),
("disabled", "Disabled (no percents)"),
],
max_length=8,
),
),
migrations.AlterField(
model_name="assignmentpoll",
name="pollmethod",
field=models.CharField(
choices=[
("Y", "Yes per candidate"),
( | "N", "No per c | andidate"),
("YN", "Yes/No per candidate"),
("YNA", "Yes/No/Abstain per candidate"),
],
max_length=5,
),
),
]
|
g
import time
import unittest
from unittest.case import skip
import weakref
logging.basicConfig(format="%(asctime)s %(levelname)-7s %(module)-15s: %(message)s")
logging.getLogger().setLevel(logging.DEBUG)
CONFIG_PATH = os.path.dirname(odemis.__file__) + "/../../install/linux/usr/share/odemis/"
SECOM_LENS_CONFIG = CONFIG_PATH + "sim/secom-sim-lens-align.odm.yaml" # 4x4
class TestAlignment(unittest.TestCase):
"""
Test Spot Alignment functions
"""
backend_was_running = False
@classmethod
def setUpClass(cls):
try:
test.start_backend(SECOM_LENS_CONFIG)
except LookupError:
logging.info("A running backend is already found, skipping tests")
cls.backend_was_running = True
return
except IOError as exp:
logging.error(str(exp))
raise
# find components by their role
cls.ebeam = model.getComponent(role="e-beam")
cls.sed = model.getComponent(role="se-detector")
cls.ccd = model.getComponent(role="ccd")
cls.focus = model.getComponent(role="focus")
cls.align = model.getComponent(role="align")
cls.light = model.getComponent(role="light")
cls.light_filter = model.getComponent(role="filter")
cls.stage = model.getComponent(role="stage")
# Used for OBJECTIVE_MOVE type
cls.aligner_xy = ConvertStage("converter-ab", "stage",
children={"orig": cls.align},
axes=["b", "a"],
rotation=math.radians(45))
@classmethod
def tearDownClass(cls):
if cls.backend_was_running:
return
test.stop_backend()
def setUp(self):
if self.backend_was_running:
self.skipTest("Running backend found")
# image for FakeCCD
self.data = hdf5.read_data("../align/test/one_spot.h5")
C, T, Z, Y, X = self.data[0].shape
self.data[0].shape = Y, X
self.fake_img = self.data[0]
# @skip("skip")
def test_spot_alignment(self):
"""
Test AlignSpot
"""
escan = self.ebeam
ccd = self.ccd
focus = self.focus
f = align.AlignSpot(ccd, self.aligner_xy, escan, focus)
dist, vector = f.result()
self.assertAlmostEqual(dist, 2.41e-05)
# @skip("faster")
def test_spot_alignment_cancelled(self):
"""
Test AlignSpot cancellation
"""
escan = self.ebeam
ccd = self.ccd
focus = self.focus
f = align.AlignSpot(ccd, self.aligner_xy, escan, focus)
time.sleep(0.01) # Cancel almost after the half grid is scanned
f.cancel()
self.assertTrue(f.cancelled())
self.assertTrue(f.done())
with self.assertRaises(futures.CancelledError):
f.result()
def on_done(self, future):
self.done += 1
def on_progress_update(self, future, past, left):
self.past = past
self.left = left
self.updates += 1
def test_aligned_stream(self):
"""
Test the AlignedSEMStream
"""
# Use fake ccd in order to have just one spot
ccd = FakeCCD(self, self.align)
# first try using the metadata correction
st = stream.AlignedSEMStream("sem-md", self.sed, self.sed.data, self.ebeam,
ccd, self.stage, self.focus, shiftebeam=stream.MTD_MD_UPD)
# we don't really care about the SEM image, so the faster the better
self. | ebeam.dwellTime.value = self.ebeam.dwellTime.range[0]
# start one image acquisition (so it should do the calibration)
f = acq.acquire([st])
received, _ = f.result()
self.assertTrue(received, "No image received after 30 s")
# Check the correction metadata is there
| md = self.sed.getMetadata()
self.assertIn(model.MD_POS_COR, md)
# Check the position of the image is correct
pos_cor = md[model.MD_POS_COR]
pos_dict = self.stage.position.value
pos = (pos_dict["x"], pos_dict["y"])
exp_pos = tuple(p - c for p, c in zip(pos, pos_cor))
imd = received[0].metadata
self.assertEqual(exp_pos, imd[model.MD_POS])
# Check the calibration doesn't happen again on a second acquisition
bad_cor = (-1, -1) # stupid impossible value
self.sed.updateMetadata({model.MD_POS_COR: bad_cor})
f = acq.acquire([st])
received, _ = f.result()
self.assertTrue(received, "No image received after 10 s")
# if calibration has happened (=bad), it has changed the metadata
md = self.sed.getMetadata()
self.assertEqual(bad_cor, md[model.MD_POS_COR],
"metadata has been updated while it shouldn't have")
# Check calibration happens again after a stage move
f = self.stage.moveRel({"x": 100e-6})
f.result() # make sure the move is over
time.sleep(0.1) # make sure the stream had time to detect position has changed
received = st.image.value
f = acq.acquire([st])
received, _ = f.result()
self.assertTrue(received, "No image received after 30 s")
# if calibration has happened (=good), it has changed the metadata
md = self.sed.getMetadata()
self.assertNotEqual(bad_cor, md[model.MD_POS_COR],
"metadata hasn't been updated while it should have")
class FakeCCD(model.HwComponent):
"""
Fake CCD component that returns a spot image
"""
def __init__(self, testCase, align):
super(FakeCCD, self).__init__("testccd", "ccd")
self.testCase = testCase
self.align = align
self.exposureTime = model.FloatContinuous(1, (1e-6, 1000), unit="s")
self.binning = model.TupleContinuous((1, 1), [(1, 1), (8, 8)],
cls=(int, long, float), unit="")
self.resolution = model.ResolutionVA((2160, 2560), [(1, 1), (2160, 2560)])
self.data = CCDDataFlow(self)
self._acquisition_thread = None
self._acquisition_lock = threading.Lock()
self._acquisition_init_lock = threading.Lock()
self._acquisition_must_stop = threading.Event()
self.fake_img = self.testCase.fake_img
def start_acquire(self, callback):
with self._acquisition_lock:
self._wait_acquisition_stopped()
target = self._acquire_thread
self._acquisition_thread = threading.Thread(target=target,
name="FakeCCD acquire flow thread",
args=(callback,))
self._acquisition_thread.start()
def stop_acquire(self):
with self._acquisition_lock:
with self._acquisition_init_lock:
self._acquisition_must_stop.set()
def _wait_acquisition_stopped(self):
"""
Waits until the acquisition thread is fully finished _iff_ it was requested
to stop.
"""
# "if" is to not wait if it's already finished
if self._acquisition_must_stop.is_set():
logging.debug("Waiting for thread to stop.")
self._acquisition_thread.join(10) # 10s timeout for safety
if self._acquisition_thread.isAlive():
logging.exception("Failed to stop the acquisition thread")
# Now let's hope everything is back to normal...
# ensure it's not set, even if the thread died prematurely
self._acquisition_must_stop.clear()
def _simulate_image(self):
"""
Generates the fake output.
"""
with self._acquisition_lock:
self.fake_img.metadata[model.MD_ACQ_DATE] = time.time()
output = model.DataArray(self.fake_img, self.fake_img.metadata)
return self.fake_img
def _acquire_thread(self, callback):
"""
Thread that simulates the CCD acquisition.
"""
try:
while not self._acquisition_must_stop.is_set():
# dummy
|
#!/usr/bin/env python3
"""
Created on 15 Aug 2016
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
Note: this script uses the Pt1000 temp sensor for temperature compensation.
"""
import time
from scs_core.data.json import JSONify
from scs_core.gas.afe_baseline import AFEBaseline
from scs_core.gas.afe_calib import AFECalib
from scs_core.gas.afe.pt1000_calib import Pt1000Calib
from scs_dfe.gas.afe.afe import AFE
from scs_dfe.gas.afe.pt1000 import Pt1000
from scs_dfe.interface.interface_conf import InterfaceConf
from scs_host.bus.i2c import I2C
from scs_host.sys.host import Host
# --------------------------------------------------------------------------------------------------------------------
try:
I2C.Sensors.open()
interface_conf = InterfaceConf.load(Host)
print(interface_conf)
print("-")
interface = interface_conf.interface()
print(interface)
print("-")
pt1000_calib = Pt1000Calib.load(Host)
print(pt1000_calib)
print("-")
pt1 | 000 = Pt1000(pt1000_calib)
print(pt1000)
print("-")
afe_calib = AFE | Calib.load(Host)
print(afe_calib)
print("-")
afe_baseline = AFEBaseline.load(Host)
print(afe_baseline)
print("-")
sensors = afe_calib.sensors(afe_baseline)
print('\n\n'.join(str(sensor) for sensor in sensors))
print("-")
# ----------------------------------------------------------------------------------------------------------------
afe = AFE(interface, pt1000, sensors)
print(afe)
print("-")
start_time = time.time()
temp = afe.sample_pt1000()
elapsed = time.time() - start_time
print(temp)
print("elapsed:%0.3f" % elapsed)
print("-")
start_time = time.time()
sample = afe.sample_station(1)
elapsed = time.time() - start_time
print("SN1: %s" % sample)
print("elapsed:%0.3f" % elapsed)
print("-")
start_time = time.time()
sample = afe.sample_station(4)
elapsed = time.time() - start_time
print("SN4: %s" % sample)
print("elapsed:%0.3f" % elapsed)
print("=")
start_time = time.time()
samples = afe.sample()
elapsed = time.time() - start_time
print(samples)
print("elapsed:%0.3f" % elapsed)
print("-")
jstr = JSONify.dumps(samples)
print(jstr)
print("-")
finally:
I2C.Sensors.close()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-20 01:22
from __future__ import | unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20170819_2342'),
]
operations = [
| migrations.RemoveField(
model_name='tag',
name='articles',
),
]
|
from django.db import models
from django.core.validators import validate_email, validate_slug, validate_ipv46_address
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from ava.core.models import TimeStampedModel
from ava.core_group.models import Group
from ava.core_identity.validators import validate_skype, validate_twitter
class Identity(TimeStampedModel):
# An identity is an online persona that can map to a single person, a group
# of people, or an automated service.
GROUP = 'GROUP'
PERSON = 'PERSON'
IDENTITY_TYPE_CHOICES = (
(GROUP, 'Group'),
(PERSON, 'Person'),
)
name = models.CharField(max_length=100, verbose_name='Name', null=True, blank=True)
description = models.TextField(max_length=500, verbose_name='Description', null=True, blank=True)
identity_type = models.CharField(max_length=10,
choices=IDENTITY_TYPE_CHOICES,
default=PERSON,
verbose_name='Identity Type')
groups = models.ManyToManyField(Group,
blank=True,
related_name='identities')
def __str__(self):
return self.name or ''
def get_absolute_url(self):
return reverse('identity-detail', kwargs={'pk': self.id})
class Meta:
verbose_name = 'identity'
verbose_name_plural = 'identities'
ordering = ['name']
class Person(TimeStampedModel):
first_name = models.CharField(max_length=75, validators=[validate_slug])
surname = models.CharField(max_length=75, validators=[validate_slug])
identity = models.ManyToManyField('Identity', blank=True)
def __str__(self):
return (self.first_name + " " + self.surname).strip() or ''
def get_absolute_url(self):
return reverse('person-detail', kwargs={'pk': self.id})
class Meta:
verbose_name = 'person'
verbose_name_plural = 'people'
ordering = ['surname', 'first_name']
class Identifier(TimeStampedModel):
"""
TODO: DocString
"""
EMAIL = 'EMAIL'
SKYPE = 'SKYPE'
IP = 'IPADD'
UNAME = 'UNAME'
TWITTER = 'TWITTER'
NAME = 'NAME'
IDENTIFIER_TYPE_CHOICES = (
(EMAIL, 'Email Address'),
(SKYPE, 'Skype ID'),
(IP, 'IP Address'),
(UNAME, 'Username'),
(TWITTER, 'Twitter ID'),
(NAME, 'Other name'),
)
identifier = models.CharField(max_length=100)
identifier_type = models.CharField(max_length=10,
choices=IDENTIFIER_TYPE_CHOICES,
default=EMAIL,
verbose_name='Identifier Type')
identity = models.ForeignKey('Identity', related_name='identifiers')
def __str__(self):
return self.identifier or ''
def get_absolute_url(self):
return reverse('identifier-detail', kwargs={'pk': self.id})
def clean(self):
if self.identifier_type is 'EMAIL':
try:
validate_email(self.identifier)
except ValidationError:
raise ValidationError('Identifier is not a valid email address')
if self.identifier_type is 'IPADD':
try:
| validate_ipv46_address(self.identifier)
except ValidationError:
raise ValidationError('Identifier is not a valid IPv4/IPv6 address')
if self.identifier_type is 'UNAME' or self.identifier_type is 'NAME':
try:
validate_slug(self.identifier)
except ValidationError:
raise ValidationError('Identifier is not a valid username o | r name')
if self.identifier_type is 'SKYPE':
try:
validate_skype(self.identifier)
except ValidationError:
raise ValidationError('Identifier is not a valid Skype user name')
if self.identifier_type is 'TWITTER':
try:
validate_twitter(self.identifier)
except ValidationError:
raise ValidationError('Identifier is not a valid Twitter user name')
class Meta:
unique_together = ("identifier", "identifier_type", "identity")
ordering = ['identifier', 'identifier_type']
|
he License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from cinderclient.v1 import client as cinder_client_v1
from cinderclient.v2 import client as cinder_client_v2
from requests_mock.contrib import fixture
from testtools import matchers
import nova.conf
from nova import context
from nova import exception
from nova import test
from nova.volume import cinder
CONF = nova.conf.CONF
_image_metadata = {
'kernel_id': 'fake',
'ramdisk_id': 'fake'
}
_volume_id = "6edbc2f4-1507-44f8-ac0d-eed1d2608d38"
_instance_uuid = "f4fda93b-06e0-4743-8117-bc8bcecd651b"
_instance_uuid_2 = "f4fda93b-06e0-4743-8117-bc8bcecd651c"
_attachment_id = "3b4db356-253d-4fab-bfa0-e3626c0b8405"
_attachment_id_2 = "3b4db356-253d-4fab-bfa0-e3626c0b8406"
_device = "/dev/vdb"
_device_2 = "/dev/vdc"
_volume_attachment = \
[{"server_id": _instance_uuid,
"attachment_id": _attachment_id,
"host_name": "",
"volume_id": _volume_id,
"device": _device,
"id": _volume_id
}]
_volume_attachment_2 = _volume_attachment
_volume_attachment_2.append({"server_id": _instance_uuid_2,
"attachment_id": _attachment_id_2,
"host_name": "",
"volume_id": _volume_id,
"device": _device_2,
"id": _volume_id})
exp_volume_attachment = collections.OrderedDict()
exp_volume_attachment[_instance_uuid] = {'attachment_id': _attachment_id,
'mountpoint': _device}
exp_volume_attachment_2 = exp_volume_attachment
exp_volume_attachment_2[_instance_uuid_2] = {'attachment_id': _attachment_id_2,
'mountpoint': _device_2}
class BaseCinderTestCase(object):
def setUp(self):
super(BaseCinderTestCase, self).setUp()
cinder.reset_globals()
self.requests = self.useFixture(fixture.Fixture())
self.api = cinder.API()
self.context = context.RequestContext('username',
'project_id',
auth_token='token',
service_catalog=self.CATALOG)
def flags(self, *args, **kwargs):
super(BaseCinderTestCase, self).flags(*args, **kwargs)
cinder.reset_globals()
def create_client(self):
return cinder.cinderclient(self.context)
def test_context_with_catalog(self):
self.assertEqual(self.URL, self.create_client().client.get_endpoint())
def test_cinder_http_retries(self):
retries = 42
self.flags(http_retries=retries, group='cinder')
self.assertEqual(retries, self.create_client().client.connect_retries)
def test_cinder_api_insecure(self):
# The True/False negation is awkward, but better for the client
# to pass us insecure=True and we check verify_cert == False
self.flags(insecure=True, group='cinder')
self.assertFalse(self.create_client().client.session.verify)
def test_cinder_http_timeout(self):
timeout = 123
self.flags(timeout=timeout, group='cinder')
self.assertEqual(timeout, self.create_client().client.session.timeout)
def test_cinder_api_cacert_file(self):
cacert = "/etc/ssl/certs/ca-certificates.crt"
self.flags(cafile=cacert, group='cinder')
self.assertEqual(cacert, self.create_client().client.session.verify)
class CinderTestCase(BaseCinderTestCase, test.NoDBTestCase):
"""Test case for cinder volume v1 api."""
URL = "http://localhost:8776/v1/project_id"
CATALOG = [{
"type": "volumev2",
"name": "cinderv2",
"endpoints": [{"publicURL": URL}]
}]
def create_client(self):
c = super(CinderTestCase, self).create_client()
self.assertIsInstance(c, cinder_client_v1.Client)
return c
def stub_volume(self, **kwargs):
volume = {
'display_name': None,
'display_description': None,
"attachments": [],
"availability_zone": "cinder",
"created_at": "2012-09-10T00:00:00.000000",
"id": _volume_id,
"metadata": {},
"size": 1,
"snapshot_id": None,
"status": "available",
"volume_type": "None",
"bootable": "true",
"multiattach": "true"
}
volume.update(kwargs)
return volume
def test_cinder_endpoint_template(self):
endpoint = 'http://other_host:8776/v1/%(project_id)s'
self.flags(endpoint_template=endpoint, group='cinder')
self.assertEqual('http:// | other_host:8776/v1/project_id',
self.create_client().client.endpoint_override)
def test_get_non_existing_volume(self):
self.requests.get(self.URL + '/volumes/nonexisting',
status_code=404)
| self.assertRaises(exception.VolumeNotFound, self.api.get, self.context,
'nonexisting')
def test_volume_with_image_metadata(self):
v = self.stub_volume(id='1234', volume_image_metadata=_image_metadata)
m = self.requests.get(self.URL + '/volumes/5678', json={'volume': v})
volume = self.api.get(self.context, '5678')
self.assertThat(m.last_request.path,
matchers.EndsWith('/volumes/5678'))
self.assertIn('volume_image_metadata', volume)
self.assertEqual(_image_metadata, volume['volume_image_metadata'])
class CinderV2TestCase(BaseCinderTestCase, test.NoDBTestCase):
"""Test case for cinder volume v2 api."""
URL = "http://localhost:8776/v2/project_id"
CATALOG = [{
"type": "volumev2",
"name": "cinder",
"endpoints": [{"publicURL": URL}]
}]
def setUp(self):
super(CinderV2TestCase, self).setUp()
CONF.set_override('catalog_info',
'volumev2:cinder:publicURL', group='cinder')
self.addCleanup(CONF.reset)
def create_client(self):
c = super(CinderV2TestCase, self).create_client()
self.assertIsInstance(c, cinder_client_v2.Client)
return c
def stub_volume(self, **kwargs):
volume = {
'name': None,
'description': None,
"attachments": [],
"availability_zone": "cinderv2",
"created_at": "2013-08-10T00:00:00.000000",
"id": _volume_id,
"metadata": {},
"size": 1,
"snapshot_id": None,
"status": "available",
"volume_type": "None",
"bootable": "true",
"multiattach": "true"
}
volume.update(kwargs)
return volume
def test_cinder_endpoint_template(self):
endpoint = 'http://other_host:8776/v2/%(project_id)s'
self.flags(endpoint_template=endpoint, group='cinder')
self.assertEqual('http://other_host:8776/v2/project_id',
self.create_client().client.endpoint_override)
def test_get_non_existing_volume(self):
self.requests.get(self.URL + '/volumes/nonexisting',
status_code=404)
self.assertRaises(exception.VolumeNotFound, self.api.get, self.context,
'nonexisting')
def test_volume_with_image_metadata(self):
v = self.stub_volume(id='1234', volume_image_metadata=_image_metadata)
self.requests.get(self.URL + '/volumes/5678', json={'volume': v})
volume = self.api.get(self.context, '5678')
self.assertIn('volume_image_metadata', volume)
self.assertEqual(_image_metadata, volume['volume_image_metadata'])
def test_volume_without_attachment(self):
v = self.stub_volume(id='1234')
self.requests.get(self.URL + '/volumes/5678', json={'volume': v})
volume = self.api.get(self.context, '5678')
se |
def delete_document(func):
"""Remove and expunge the returned document."""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
document = func(self, *args, **kwargs) or self
if settings.ADDREMOVE_FILES and document.tree:
document.tree.vcs.delete(document.config)
# pylint: disable=W0212
if settings.CACHE_DOCUMENTS and document.tree:
document.tree._document_cache[document.prefix] = None
log.trace("expunged document: {}".format(document)) # type: ignore
try:
os.rmdir(document.path)
except OSError:
# Directory wasn't empty
pass
return document
return wrapped
class BaseValidatable(metaclass=abc.ABCMeta):
"""Abstract Base Class for objects that can be validated."""
def validate(self, skip=None, document_hook=None, item_hook=None):
"""Check the object for validity.
:param skip: list of document prefixes to skip
:param document_hook: function to call for custom document
validation
:param item_hook: function to call for custom item validation
:return: indication that the object is valid
"""
valid = True
# Display all issues
for issue in self.get_issues(
skip=skip, document_hook=document_hook, item_hook=item_hook
):
if isinstance(issue, DoorstopInfo) and not settings.WARN_ALL:
log.info(issue)
elif isinstance(issue, DoorstopWarning) and not settings.ERROR_ALL:
log.warning(issue)
else:
assert isinstance(issue, DoorstopError)
log.error(issue)
valid = False
# Return the result
return valid
@abc.abstractmethod
def get_issues(self, skip=None, document_hook=None, item_hook=None):
"""Yield all the objects's issues.
:param skip: list of document prefixes to skip
:param document_hook: function to call for custom document
validation
:param item_hook: function to call for custom item validation
:return: generator of :class:`~doorstop.common.DoorstopError`,
:class:`~doorstop.common.DoorstopWarning`,
:class:`~doorstop.common.DoorstopInfo`
"""
@property
def issues(self):
"""Get a list of the item's issues."""
return list(self.get_issues())
def auto_load(func):
"""Call self.load() before execution."""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
self.load()
return func(self, *args, **kwargs)
return wrapped
def auto_save(func):
"""Call self.save() after execution."""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
result = func(self, *args, **kwargs)
if self.auto:
self.save( | )
return result
ret | urn wrapped
class BaseFileObject(metaclass=abc.ABCMeta):
"""Abstract Base Class for objects whose attributes save to a file.
For properties that are saved to a file, decorate their getters
with :func:`auto_load` and their setters with :func:`auto_save`.
"""
auto = True # set to False to delay automatic save until explicit save
def __init__(self):
self.path = None
self.root = None
self._data: Dict[str, str] = {}
self._exists = True
self._loaded = False
def __hash__(self):
return hash(self.path)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.path == other.path
def __ne__(self, other):
return not self == other
@staticmethod
def _create(path, name):
"""Create a new file for the object.
:param path: path to new file
:param name: humanized name for this file
:raises: :class:`~doorstop.common.DoorstopError` if the file
already exists
"""
if os.path.exists(path):
raise DoorstopError("{} already exists: {}".format(name, path))
common.create_dirname(path)
common.touch(path)
@abc.abstractmethod
def load(self, reload=False):
"""Load the object's properties from its file."""
# 1. Start implementations of this method with:
if self._loaded and not reload:
return
# 2. Call self._read() and update properties here
# 3. End implementations of this method with:
self._loaded = True
def _read(self, path):
"""Read text from the object's file.
:param path: path to a text file
:return: contexts of text file
"""
if not self._exists:
msg = "cannot read from deleted: {}".format(self.path)
raise DoorstopError(msg)
return common.read_text(path)
@staticmethod
def _load(text, path, **kwargs):
"""Load YAML data from text.
:param text: text read from a file
:param path: path to the file (for displaying errors)
:return: dictionary of YAML data
"""
return common.load_yaml(text, path, **kwargs)
@abc.abstractmethod
def save(self):
"""Format and save the object's properties to its file."""
# 1. Call self._write() with the current properties here
# 2. End implementations of this method with:
self._loaded = False
self.auto = True
def _write(self, text, path):
"""Write text to the object's file.
:param text: text to write to a file
:param path: path to the file
"""
if not self._exists:
raise DoorstopError("cannot save to deleted: {}".format(self))
common.write_text(text, path)
@staticmethod
def _dump(data):
"""Dump YAML data to text.
:param data: dictionary of YAML data
:return: text to write to a file
"""
return yaml.dump(data, default_flow_style=False, allow_unicode=True)
# properties #############################################################
@property
def relpath(self):
"""Get the item's relative path string."""
assert self.path
relpath = os.path.relpath(self.path, self.root)
return "@{}{}".format(os.sep, relpath)
# extended attributes ####################################################
@property # type: ignore
@auto_load
def extended(self):
"""Get a list of all extended attribute names."""
names = []
for name in self._data:
if not hasattr(self, name):
names.append(name)
return sorted(names)
@auto_load
def get(self, name, default=None):
"""Get an extended attribute.
:param name: name of extended attribute
:param default: value to return for missing attributes
:return: value of extended attribute
"""
if hasattr(self, name):
cname = self.__class__.__name__
msg = "'{n}' can be accessed from {c}.{n}".format(n=name, c=cname)
log.trace(msg) # type: ignore
return getattr(self, name)
else:
return self._data.get(name, default)
@auto_load
@auto_save
def set(self, name, value):
"""Set an extended attribute.
:param name: name of extended attribute
:param value: value to set
"""
if hasattr(self, name):
cname = self.__class__.__name__
msg = "'{n}' can be set from {c}.{n}".format(n=name, c=cname)
log.trace(msg) # type: ignore
setattr(self, name, value)
else:
self._data[name] = value
# actions ################################################################
def delete(self, path):
"""Delete the object's file from the file system."""
if self._exists:
log.info("deleting {}...".format(path))
common.delete(path)
self._loaded = False # force the object to reload
self._exists = False # but, prevent futu |
try:
from setuptools import setup
except ImportError:
from distutils.core import | setup
config = {
'description': 'ex48',
'author': 'Zhao, Li',
'url': 'URL to get it at.',
'download_url': 'Where to download it.',
'author_email': 'zhaoace@gmail.com',
'version': '0.1',
'install_requires': ['nose'],
'packages': ['ex48'],
'scripts': [],
'name': 'ex48 | '
}
setup(**config) |
# -*- encoding: utf-8 -*-
#
# Copyright © 2013 IBM Corp
#
# Author: Tong Li <litong01@us.ibm.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging.handlers
import os
import tempfile
from ceilometer.dispatcher import file
from ceilometer.openstack.common.fixture import config
from ceilometer.openstack.common import test
from ceilometer.publisher import utils
class TestDispatcherFile(test.BaseTestCase):
def setUp(self):
super(TestDispatcherFile, self).setUp()
self.CONF = self.useFixture(config.Config()).conf
def test_file_dispatcher_with_all_config(self):
# Create a temporaryFile to get a file name
tf = tempfile.NamedTemporaryFile('r')
filename = tf.name
tf.close()
self.CONF.dispatcher_file.file_path = filename
self.CONF.dispatcher_file.max_bytes = 50
self.CONF.dispatcher_file.backup_count = 5
dispatcher = file.FileDispatcher(self.CONF)
# The number of the handlers should be 1
self.assertEqual(1, len(dispatcher.log.handlers))
# The handler should be RotatingFileHandler
handler = dispatcher.log.handlers[0]
self.assertIsInstance(handler,
logging.handlers.RotatingFileHandler)
msg = {'counter_name': 'test',
'resource_id': self.id(),
'counter_volume': 1,
}
msg['message_signature'] = utils.compute_signature(
msg,
self.CONF.publisher.metering_secret,
)
# The record_metering_data method should exist and not produce errors.
dispatcher.record_metering_data(msg)
# After the method call above, the file should have | been created.
self.assertTrue(os.path.exists(handler.baseFilename))
def test_file_dispatcher_with_path_only(self):
# Create a temporaryFile to get a file name
tf = tempfile.NamedTemporaryFile('r')
filename = tf.name
tf.close()
self.CONF.dispatcher_file.file_path = filena | me
self.CONF.dispatcher_file.max_bytes = None
self.CONF.dispatcher_file.backup_count = None
dispatcher = file.FileDispatcher(self.CONF)
# The number of the handlers should be 1
self.assertEqual(1, len(dispatcher.log.handlers))
# The handler should be RotatingFileHandler
handler = dispatcher.log.handlers[0]
self.assertIsInstance(handler,
logging.FileHandler)
msg = {'counter_name': 'test',
'resource_id': self.id(),
'counter_volume': 1,
}
msg['message_signature'] = utils.compute_signature(
msg,
self.CONF.publisher.metering_secret,
)
# The record_metering_data method should exist and not produce errors.
dispatcher.record_metering_data(msg)
# After the method call above, the file should have been created.
self.assertTrue(os.path.exists(handler.baseFilename))
def test_file_dispatcher_with_no_path(self):
self.CONF.dispatcher_file.file_path = None
dispatcher = file.FileDispatcher(self.CONF)
# The log should be None
self.assertIsNone(dispatcher.log)
|
#!/usr/bin/env python
imp | ort os
import sys
from optparse import OptionParser
def makeoptions():
parser = OptionParser()
parser.add_option("-v", "--verbosity",
t | ype = int,
action="store",
dest="verbosity",
default=1,
help="Tests verbosity level, one of 0, 1, 2 or 3")
return parser
if __name__ == '__main__':
import djpcms
import sys
options, tags = makeoptions().parse_args()
verbosity = options.verbosity
p = os.path
path = p.join(p.split(p.abspath(__file__))[0],'tests')
sys.path.insert(0, path)
from testrunner import run
run(tags, verbosity = verbosity) |
import tensorflow as tf
import numpy as np
import cv2
img_original = cv2.imread('jack.jpg') #data.camera()
img = cv2.resize(img_original, (64*5,64*5))
# for positions
xs = []
# for corresponding colors
ys = []
for row_i in range(img.shape[0]):
for col_i in range(img.shape[1]):
xs.append([row_i, col_i])
ys.append(img[row_i, col_i])
# list->numpy array
xs,ys = np.array(xs),np.array(ys)
# normalising input img
xs = (xs-np.mean(xs))/np.std(xs)
# placeholders for input and output
X = tf.placeholder(tf.float32, shape=[None, 2], name='X')
Y = tf.placeholder(tf.float32, shape=[None, 3], name='Y')
#defining weights,bias,non-linearity
def linear(X, n_input, n_output, activation=None, scope=None):
with tf.variable_scope(scope or "linear"):
W = tf.get_variable(
name='W',
shape=[n_input, n_output],
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.1))
b = tf.get_variable(
name='b',
shape=[n_output],
initializer=tf.constant_initializer())
h = tf.matmul(X, W) + b
if activation is not None:
h = activation(h)
return h
#building neural-net with 5 layers
n_neurons = [2,64,64,64,64,64,64,3]
#defining optimizer
def distance(p1, p2):
return tf.abs(p1 - p2)
#building network
current_input = X
for layer_i in range(1, len(n_neurons)):
current_input = linear(
X=current_input,
n | _input=n_neurons[layer_i - 1],
n_output=n_neurons[layer_i],
activation=tf.nn.relu if (layer_i+1) < len(n_neurons) else None,
scope='layer_' + str(layer_i))
Y_pred = current_input
cost = | tf.reduce_mean(tf.reduce_sum(distance(Y_pred,Y),1) )
optimizer = tf.train.AdamOptimizer(0.001).minimize(cost)
#training Neural Net
n_iterations = 500
batch_size = 50
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
prev_training_cost = 0.0
for it_i in range(n_iterations):
idxs = np.random.permutation(range(len(xs)))
n_batches = len(idxs) // batch_size
for batch_i in range(n_batches):
idxs_i = idxs[batch_i * batch_size: (batch_i + 1) * batch_size]
sess.run(optimizer, feed_dict={X: xs[idxs_i], Y: ys[idxs_i] })
training_cost = sess.run(cost, feed_dict={X: xs, Y: ys})
print(it_i, training_cost)
if (it_i + 1) % 20 == 0:
ys_pred = Y_pred.eval(feed_dict={X: xs}, session=sess)
fig, ax = plt.subplots(1, 1)
print ys_pred.shape,img.shape
print ys_pred
img = np.clip(ys_pred.reshape(img.shape), 0, 255).astype(np.uint8)
cv2.imwrite("face____" + str(it_i) + ".jpg", img)
|
from django import forms
from django.core.urlresolvers import reverse
from django.forms.widgets import RadioFieldRenderer
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.utils.safestring import mark_safe
class BootstrapChoiceFieldRenderer(RadioFieldRenderer):
"""
An object used by RadioSelect to enable customization of radio widgets.
"""
def render(self):
"""
Outputs a <div> for this set of choice fields.
If an id was given to the field, it is applied to the <di> (each
item in the list will get an id of `$id_$i`).
"""
id_ = self.attrs.get('id', None)
start_tag = format_html('<div id="{0}">', id_) if id_ else '<div>'
output = [start_tag]
for widget in self:
output.append(format_html('<div class="radio">{0}</div>', force_text(widget)))
output.append('</div>')
return mark_safe('\n'.join(output))
class UseCustomRegWidget(forms.MultiWidget):
"""
This widget is for three fields on event add/edit under Registration:
* use_custom_reg_form
* reg_form
* bind_reg_form_to_conf_only
"""
def __init__(self, attrs=None, reg_form_choices=None, event_id=None):
self.attrs = attrs
self.reg_form_choices = reg_form_choices
self.event_id = event_id
if not self.attrs:
| self.attrs = {'id': 'use_custom_reg'}
self.widgets = (
forms.CheckboxInput(),
forms.Select(attrs={'class': 'form-control'}),
| forms.RadioSelect(renderer=BootstrapChoiceFieldRenderer)
)
super(UseCustomRegWidget, self).__init__(self.widgets, attrs)
def render(self, name, value, attrs=None):
if not isinstance(value, list):
value = self.decompress(value)
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
use_custom_reg_form_widget = self.widgets[0]
rendered_use_custom_reg_form = self.render_widget(
use_custom_reg_form_widget,
name, value, final_attrs,
0, id_
)
reg_form_widget = self.widgets[1]
reg_form_widget.choices = self.reg_form_choices
#reg_form_widget.attrs = {'size':'8'}
rendered_reg_form = self.render_widget(
reg_form_widget,
name, value, final_attrs,
1, id_
)
bind_reg_form_to_conf_only_widget = self.widgets[2]
choices = (
('1', mark_safe('Use one form for all pricings %s' % rendered_reg_form)),
)
bind_reg_form_to_conf_only_widget.choices = choices
rendered_bind_reg_form_to_conf_only = self.render_widget(
bind_reg_form_to_conf_only_widget,
name, value, final_attrs,
2, id_
)
rendered_bind_reg_form_to_conf_only = rendered_bind_reg_form_to_conf_only.replace(
'%s</label>' % rendered_reg_form, "</label>%s" % rendered_reg_form
)
if self.event_id:
manage_custom_reg_link = """
<div>
<a href="%s" target="_blank">Manage Custom Registration Form</a>
</div>
""" % reverse('event.event_custom_reg_form_list', args=[self.event_id])
else:
manage_custom_reg_link = ''
output_html = """
<div id="t-events-use-customreg-box">
<div id="t-events-use-customreg-checkbox" class="checkbox">
<label for="id_%s_%s">%s Use Custom Registration Form</label>
</div>
<div id="t-events-one-or-separate-form">%s</div>
%s
</div>
""" % (
name, '0',
rendered_use_custom_reg_form,
rendered_bind_reg_form_to_conf_only,
manage_custom_reg_link
)
return mark_safe(output_html)
def render_widget(self, widget, name, value, attrs, index=0, id=None):
i = index
id_ = id
if value:
try:
widget_value = value[i]
except IndexError:
self.fields['use_reg_form'].initial = None
else:
widget_value = None
if id_:
final_attrs = dict(attrs, id='%s_%s' % (id_, i))
if widget.__class__.__name__.lower() != 'select':
classes = final_attrs.get('class', None)
if classes:
classes = classes.split(' ')
classes.remove('form-control')
classes = ' '.join(classes)
final_attrs['class'] = classes
return widget.render(name+'_%s' %i, widget_value, final_attrs)
def decompress(self, value):
if value:
data_list = value.split(',')
if data_list[0] == '1':
data_list[0] = 'on'
return data_list
return None
|
# Prints exactly what the script is about to do
print "How many keys are there for the swedish alphabet?"
# Prints the amount of the top row
print "The top row has 11 letter keys"
# Assig | ns a value to top
top = 11.0
# Prints the amount of the middle row
print "The middle row has 11 letter keys"
# Assigns a value to middle
middle = 11
# Prints the amount of the bottom row
print "The bottom row has 7 letter keys"
# Assigns a value to bottom
bottom = 7
# P | rints text then the combined value of from the three rows
print "The total number of letter keys are ", top + middle + bottom
|
import py
import re
from testing.test_interpreter import BaseTestInterpreter
from testing.test_main import TestMain
from hippy.main import entry_point
class TestOptionsMain(TestMain):
def test_version_compare(self, capfd):
output = self.run('''<?php
$versions = array(
'1',
'1.0',
'1.01',
'1.1',
'1.10',
'1.10b',
'1.10.0',
'-3.2.1',
| '1rc.0.2',
'bullshit.rc.9.2beta',
);
foreach ($versions as $version) {
if (isset($last)) {
$comp = version_compare($last, $version);
echo $comp;
}
$last = $version;
}
?>''', capfd)
assert output == "-1 | -10-11-11-11"
def test_version_compare_with_cmp(self, capfd):
output = self.run('''<?php
$versions = array(
'1',
'1.0',
'1.01',
'1.1',
'1.10',
'1.10b',
'1.10.0',
'-3.2.1',
'1rc.0.2',
'bullshit.rc.9.2beta',
);
$co = array(
'=',
'==',
'eq',
'!=',
'<>',
'ne',
'>',
'gt',
'<',
'lt',
'>=',
'ge',
'<=',
'le',
);
foreach ($versions as $version) {
if (isset($last)) {
foreach ($co as $c) {
$comp = version_compare($last, $version, $c);
echo (int)$comp;
}
}
$last = $version;
}
?>''', capfd)
assert output == "000111001100110001110011001111100000001111000111001100110001111100110000011100110011000111110011000001110011001100011111001100"
class TestOptionsFunc(BaseTestInterpreter):
def test_get_cfg_var(self):
php_version = "6.0"
test_value = "test_value"
space = self.space
def setup_conf(interp):
interp.config.ini.update({
'php_version': space.wrap(php_version),
'test_value': space.wrap(test_value),
})
output = self.run('''
echo get_cfg_var('php_version');
echo get_cfg_var('test_value');
''', extra_func=setup_conf)
assert self.space.str_w(output[0]) == php_version
assert self.space.str_w(output[1]) == test_value
def test_get_cfg_var2(self):
output = self.run('''
echo get_cfg_var('');
echo get_cfg_var(' ');
echo get_cfg_var('non_existent_var');
echo get_cfg_var(null);
echo get_cfg_var(1);
echo get_cfg_var(1.0);
''')
assert all([o == self.space.w_False for o in output])
def test_get_cfg_var3(self):
with self.warnings() as w:
output = self.run('''
echo get_cfg_var(array(1));
class Test {};
echo get_cfg_var(new Test);
''')
assert output[0] == self.space.w_Null
assert output[1] == self.space.w_Null
assert w[0] == 'Warning: get_cfg_var() ' +\
'expects parameter 1 to be string, array given'
assert w[1] == 'Warning: get_cfg_var() ' +\
'expects parameter 1 to be string, object given'
|
"""
Unit tests for the base mechanism class.
"""
import pytest
from azmq.mechanisms.base import Mechanism
from azmq.errors import ProtocolError
@pytest.mark.asyncio
async def test_expect_command(reader):
reader.write(b'\x04\x09\x03FOOhello')
reader.seek(0)
result = await Mechanism._expect_command(reader=reader, name=b'FOO')
assert result == b'hello'
@pytest.mark.asyncio
async def test_expect_command_large(reader):
reader.write(b'\x06\x00\x00\x00\x00\x00\x00\x00\x09\x03FOOhello')
reader.seek(0)
result = await Mechanism._expect_command(reader=reader, name=b'FOO')
assert result == b'hello'
@pytest.mark.asyncio
async def test_expect_command_invalid_size_type(reader):
reader.write(b'\x03')
reader.seek(0)
with pytest.raises(ProtocolError):
await Mechanism._expect_command(reader=reader, name=b'FOO')
@pytest.mark.asyncio
async def test_expect_command_invalid_name_size(reader):
reader.write(b'\x04\x09\x04HELOhello')
reader.seek(0)
with pytest.raises(ProtocolError):
await Mechanism._expect_command(reader=reader, name=b'FOO')
@pytest.mark.asyncio
async def test_expect_command_invalid_name(reader):
reader.write(b'\x04\x08\x03BARhello')
reader.seek(0)
with pytest.raises(ProtocolError):
await Mechanism._expect_command(reader=reader, name=b'FOO')
@pytest.mark.asyncio
async def test_read_frame(reader):
reader.write(b'\x00\x03foo')
reader.seek(0)
async def on_command(name, data):
assert False
result = await | Mechanism.read(reader=reader, on_command=on_command)
| assert result == (b'foo', True)
@pytest.mark.asyncio
async def test_read_frame_large(reader):
reader.write(b'\x02\x00\x00\x00\x00\x00\x00\x00\x03foo')
reader.seek(0)
async def on_command(name, data):
assert False
result = await Mechanism.read(reader=reader, on_command=on_command)
assert result == (b'foo', True)
@pytest.mark.asyncio
async def test_read_command(reader):
reader.write(b'\x04\x09\x03BARhello\x00\x03foo')
reader.seek(0)
async def on_command(name, data):
assert name == b'BAR'
assert data == b'hello'
result = await Mechanism.read(reader=reader, on_command=on_command)
assert result == (b'foo', True)
@pytest.mark.asyncio
async def test_read_invalid_size_type(reader):
reader.write(b'\x09')
reader.seek(0)
async def on_command(name, data):
assert False
with pytest.raises(ProtocolError):
await Mechanism.read(reader=reader, on_command=on_command)
|
# -*- coding: utf8 -*-
from phystricks import *
def MBWHooeesXIrsz():
pspict,fig = SinglePicture("MBWHooeesXIrsz")
pspict.dilatation(0.3)
l=4
A=Point(0,0)
B=Point(l,0)
C=Point(l,l)
trig=Polygon(A,B,C)
trig.put_mark(0.2,pspict=pspict)
trig.edges[0].put_code(n=2,d=0.1,l=0.2,psp | ict=pspict)
trig.edges[1].put_code(n=2,d=0.1,l=0.2,pspict=pspict)
no_symbol( | trig.vertices)
pspict.DrawGraphs(trig)
pspict.comment="Vérifier la longueur des codages."
fig.no_figure()
fig.conclude()
fig.write_the_file()
|
#!/usr/bin/env python3
# Copyright (c) 2016 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''"If This Then That" Gmail example
This example demonstrates how "If This Then That" (http://ifttt.com) can be used
make Cozmo respond when a Gmail account receives an email. Instructions below
will lead you through setting up an applet on the IFTTT website. When the applet
trigger is called (which sends a web request received by the web server started
in this example), Cozmo will play an animation, speak the email sender's name and
show a mailbox image on his face.
Please place Cozmo on the charger for this example. When necessary, he will be
rolled off and back on.
Follow these steps to set up and run the example:
1) Provide a a static ip, URL or similar that can be reached from the If This
Then That server. One easy way to do this is with ngrok, which sets up
a secure tunnel to localhost running on your machine.
To set up ngrok:
a) Follow instructions here to download and install:
https://ngrok.com/download
b) Run this command to create a secure public URL for port 8080:
./ngrok http 8080
c) Note the HTTP forwarding address shown in the terminal (e.g., http://55e57164.ngrok.io).
You will use this address in your applet, below.
WARNING: Using ngrok exposes your local web server to the internet. See the ngrok
documentation for more information: https://ngrok.com/docs
2) Set up your applet on the "If This Then That" website.
a) Sign up and sign into https://ifttt.com
b) Create an applet: https://ifttt.com/create
c) Set up your trigger.
1. Click "this".
2. Select "Gmail" as your service. If prompted, click "Connect",
select your Gmail account, and click “Allow” to provide permissions
to IFTTT for your email account. Click "Done".
3. Under "Choose a Trigger", select “Any new email in inbox".
d) Set up your action.
1. Click “that".
2. Select “Maker" to set it as your action channel. Connect to the Maker channel if prompted.
3. Click “Make a web request" and fill out the fields as follows. Remember your publicly
accessible URL from above (e.g., http://55e57164.ngrok.io) and use it in the URL field,
followed by "/iftttGmail" as shown below:
URL: http://55e57164.ngrok.io/iftttGmail
Method: POST
Content Type: application/json
Body: {"FromAddress":"{{FromAddress}}"}
5. Click “Create Action" then “Finish".
3) Test your applet.
a) Run this script at the command line: ./ifttt_gmail.py
b) On ifttt.com, on your applet page, click “Check now”. See that IFTTT confirms that the applet
was checked.
c) Send an email to the Gmail account in your recipe
d) On your IFTTT applet webpage, again click “Check now”. This should cause IFTTT to detect that
the email was received and send a web request to the ifttt_gmail.py script.
e) In response to the ifttt web request, Cozmo should roll off the charger, raise and lower
his lift, announce the email, and then show a mailbox image on his face.
'''
import asyncio
import re
import sys
try:
from aiohttp import web
except ImportError:
sys.exit("Cannot import from aiohttp. Do `pip3 install --user aiohttp` to install")
import cozmo
from common import IFTTTRobot
app = web.Application()
async def serve_gmail(request):
'''Define an HTTP POST handler for receiving requests from If This Then That.
You may modify this method to change how Cozmo reacts to the email
being received.
'''
json_object = await request.json()
# Extract the name of the email sender.
from_email_address = json_object["FromAddress"]
# Use a regular expression to break apart pieces of the email address
match_object = re.search(r'([\w.]+)@([\w.]+)', from_email_address)
email_local_part = match_object.group(1)
robot = request.app['robot']
async def read_name():
try:
async with robot.perform_off_charger():
'''If necessary, Move Cozmo's Head and Lift to make it easy to see Cozmo's face.'''
await robot.get_in_position()
# First, have Cozmo play animation "ID_pokedB", which tells
# Cozmo to raise and lower his lift. To change the animation,
# you may replace "ID_pokedB" with another animation. Run
# remote_control_cozmo.py to see a list of animations.
await robot.play_anim(name='ID_pokedB').wait_for_completed()
# Next, have Cozmo speak the name of the email sender.
await robot.say_text("Email from " + email_local_part).wait_for_completed()
# Last, have Cozmo display an email image on his face.
robot.display_image_file_on_face("../face_images/ifttt_gmail.png")
except cozmo.RobotBusy:
cozmo.logger.warning("Robot was busy so didn't read email address: "+ from_email_address)
# Perform Cozmo's task in the background so the HTTP s | erver responds immediately.
asyncio.ensure_future(read_name())
return web.Response(text="OK")
# Attach the function as an HTTP handler.
app.router.add_post('/iftttGmail | ', serve_gmail)
if __name__ == '__main__':
cozmo.setup_basic_logging()
cozmo.robot.Robot.drive_off_charger_on_connect = False
# Use our custom robot class with extra helper methods
cozmo.conn.CozmoConnection.robot_factory = IFTTTRobot
try:
sdk_conn = cozmo.connect_on_loop(app.loop)
# Wait for the robot to become available and add it to the app object.
app['robot'] = app.loop.run_until_complete(sdk_conn.wait_for_robot())
except cozmo.ConnectionError as e:
sys.exit("A connection error occurred: %s" % e)
web.run_app(app)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.