prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
#!/usr/bin/env python
"""
@package mi.dataset.parser.test
@fid mi-instrument/mi/dataset/parser/test/test_ctdav_nbosi_auv.py
@author Rene Gelinas
@brief Test code for a ctdav_nbosi_auv data parser
"""
import os
from nose.plugins.attrib import attr
from mi.core.log import get_logger
from mi.dataset.driver.ctdav_nbosi.auv.resource import RESOURCE_PATH
from mi.dataset.parser.ctdav_nbosi_auv import CtdavNbosiAuvParser
from mi.dataset.test.test_parser import ParserUnitTestCase
log = get_logger()
@attr('UNIT', group='mi')
class CtdavNbosiAuvTestCase(ParserUnitTestCase):
"""
ctdav_nbosi_auv Parser unit test suite
"""
def test_simple(self):
"""
Read test data and pull out data particles.
Assert that the results are those we expected.
Expect the first two input records to be skipped due to invalid timestamp.
"""
stream_handle = open(os.path.join(RESOURCE_PATH, 'CP05MOAS-A6264_AUVsubset_reduced.csv'), 'rU')
parser = CtdavNbosiAuv | Parser(stream_handle,
self.exception_callback)
particles = parser.get_records(20)
self.assert_particles(particles, 'ctdav_nbosi_auv.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
stream_handle.close()
def test_long_stream(self):
"""
Read test data and pull out data particles.
Assert the expected number of particles is captured and there are no exceptions
"""
stream_ | handle = open(os.path.join(RESOURCE_PATH, 'CP05MOAS-A6264_AUVsubset.csv'), 'rU')
parser = CtdavNbosiAuvParser(stream_handle,
self.exception_callback)
particles = parser.get_records(10000)
self.assertEqual(len(particles), 10000)
self.assertEqual(self.exception_callback_value, [])
stream_handle.close()
|
for ip in port['fixed_ips']]
return port_dict
class NeutronRestProxyV2Base(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin):
supported_extension_aliases = ["binding"]
servers = None
@property
def l3_plugin(self):
return manager.NeutronManager.get_service_plugins().get(
pconst.L3_ROUTER_NAT)
def _get_all_data(self, get_ports=True, get_floating_ips=True,
get_routers=True):
admin_context = qcontext.get_admin_context()
networks = []
# this method is used by the ML2 driver so it can't directly invoke
# the self.get_(ports|networks) methods
plugin = manager.NeutronManager.get_plugin()
all_networks = plugin.get_networks(admin_context) or []
for net in all_networks:
mapped_network = self. | _get_mapped_network_with_subnets(net)
flips_n_ports = mapped_network
if get_floating_ips:
flips_n_ports = self._get_network_with_floatingips(
| mapped_network)
if get_ports:
ports = []
net_filter = {'network_id': [net.get('id')]}
net_ports = plugin.get_ports(admin_context,
filters=net_filter) or []
for port in net_ports:
mapped_port = self._map_state_and_status(port)
mapped_port['attachment'] = {
'id': port.get('device_id'),
'mac': port.get('mac_address'),
}
mapped_port = self._extend_port_dict_binding(admin_context,
mapped_port)
ports.append(mapped_port)
flips_n_ports['ports'] = ports
if flips_n_ports:
networks.append(flips_n_ports)
data = {'networks': networks}
if get_routers and self.l3_plugin:
routers = []
all_routers = self.l3_plugin.get_routers(admin_context) or []
for router in all_routers:
interfaces = []
mapped_router = self._map_state_and_status(router)
router_filter = {
'device_owner': [const.DEVICE_OWNER_ROUTER_INTF],
'device_id': [router.get('id')]
}
router_ports = self.get_ports(admin_context,
filters=router_filter) or []
for port in router_ports:
net_id = port.get('network_id')
subnet_id = port['fixed_ips'][0]['subnet_id']
intf_details = self._get_router_intf_details(admin_context,
net_id,
subnet_id)
interfaces.append(intf_details)
mapped_router['interfaces'] = interfaces
routers.append(mapped_router)
data.update({'routers': routers})
return data
def _send_all_data(self, send_ports=True, send_floating_ips=True,
send_routers=True, timeout=None,
triggered_by_tenant=None):
"""Pushes all data to network ctrl (networks/ports, ports/attachments).
This gives the controller an option to re-sync it's persistent store
with neutron's current view of that data.
"""
data = self._get_all_data(send_ports, send_floating_ips, send_routers)
data['triggered_by_tenant'] = triggered_by_tenant
errstr = _("Unable to update remote topology: %s")
return self.servers.rest_action('PUT', servermanager.TOPOLOGY_PATH,
data, errstr, timeout=timeout)
def _get_network_with_floatingips(self, network, context=None):
if context is None:
context = qcontext.get_admin_context()
net_id = network['id']
net_filter = {'floating_network_id': [net_id]}
if self.l3_plugin:
fl_ips = self.l3_plugin.get_floatingips(context,
filters=net_filter) or []
network['floatingips'] = fl_ips
return network
def _get_all_subnets_json_for_network(self, net_id, context=None):
if context is None:
context = qcontext.get_admin_context()
# start a sub-transaction to avoid breaking parent transactions
with context.session.begin(subtransactions=True):
subnets = self._get_subnets_by_network(context,
net_id)
subnets_details = []
if subnets:
for subnet in subnets:
subnet_dict = self._make_subnet_dict(subnet)
mapped_subnet = self._map_state_and_status(subnet_dict)
subnets_details.append(mapped_subnet)
return subnets_details
def _get_mapped_network_with_subnets(self, network, context=None):
# if context is not provided, admin context is used
if context is None:
context = qcontext.get_admin_context()
network = self._map_state_and_status(network)
subnets = self._get_all_subnets_json_for_network(network['id'],
context)
network['subnets'] = subnets
for subnet in (subnets or []):
if subnet['gateway_ip']:
# FIX: For backward compatibility with wire protocol
network['gateway'] = subnet['gateway_ip']
break
else:
network['gateway'] = ''
network[external_net.EXTERNAL] = self._network_is_external(
context, network['id'])
# include ML2 segmentation types
network['segmentation_types'] = getattr(self, "segmentation_types", "")
return network
def _send_create_network(self, network, context=None):
tenant_id = network['tenant_id']
mapped_network = self._get_mapped_network_with_subnets(network,
context)
self.servers.rest_create_network(tenant_id, mapped_network)
def _send_update_network(self, network, context=None):
net_id = network['id']
tenant_id = network['tenant_id']
mapped_network = self._get_mapped_network_with_subnets(network,
context)
net_fl_ips = self._get_network_with_floatingips(mapped_network,
context)
self.servers.rest_update_network(tenant_id, net_id, net_fl_ips)
def _send_delete_network(self, network, context=None):
net_id = network['id']
tenant_id = network['tenant_id']
self.servers.rest_delete_network(tenant_id, net_id)
def _map_state_and_status(self, resource):
resource = copy.copy(resource)
resource['state'] = ('UP' if resource.pop('admin_state_up',
True) else 'DOWN')
resource.pop('status', None)
return resource
def _warn_on_state_status(self, resource):
if resource.get('admin_state_up', True) is False:
LOG.warning(_("Setting admin_state_up=False is not supported "
"in this plugin version. Ignoring setting for "
"resource: %s"), resource)
if 'status' in resource:
if resource['status'] != const.NET_STATUS_ACTIVE:
LOG.warning(_("Operational status is internally set by the "
"plugin. Ignoring setting status=%s."),
resource['status'])
def _get_router_intf_details(self, context, intf_id, subnet_id):
# we will use the network id as interface's id
net_id = intf_id
network = self.get_network(context, net_id |
es=25):
randomSeed(0)
drawBot.newDrawing()
for i in range(numFrames):
drawBot.newPage(500, 500)
drawBot.frameDuration(1/25)
drawBot.fill(1)
drawBot.rect(0, 0, 500, 500)
drawBot.fill(0)
drawBot.rect(random.randint(0, 100), random.randint(0, 100), 400, 400)
def makeTestDrawing(self):
drawBot.newDrawing()
drawBot.newPage(500, 500)
drawBot.oval(100, 100, 300, 300)
def _saveImageAndReturnSize(self, extension, **options):
with TempFile(suffix=extension) as tmp:
drawBot.saveImage(tmp.path, **options)
fileSize = os.stat(tmp.path).st_size
return fileSize
def test_ffmpegCodec(self):
self.makeTestAnimation()
size_h264 = self._saveImageAndReturnSize(".mp4")
size_mpeg4 = self._saveImageAndReturnSize(".mp4", ffmpegCodec="mpeg4")
self.assertLess(size_h264, size_mpeg4, "encoded with h264 is expected to be smaller than with mpeg4")
def test_arbitraryOption(self):
self.makeTestAnimation(1)
with StdOutCollector(captureStdErr=True) as output:
self._saveImageAndReturnSize(".png", someArbitraryOption="foo")
self.assertEqual(output, ['*** DrawBot warning: Unrecognized saveImage() option found for PNGContext: someArbitraryOption ***'])
def test_export_mov(self):
self.makeTestAnimation(5)
self._saveImageAndReturnSize(".mov")
def test_export_gif(self):
self.makeTestAnimation(5)
self._saveImageAndReturnSize(".gif")
def test_export_png(self):
self.makeTestDrawing()
self._saveImageAndReturnSize(".png")
def test_export_jpg(self):
self.makeTestDrawing()
self._saveImageAndReturnSize(".jpg")
def test_export_jpeg(self):
self.makeTestDrawing()
self._saveImageAndReturnSize(".jpeg")
def test_export_tif(self):
self.makeTestDrawing()
self._saveImageAndReturnSize(".tif")
def test_export_tiff(self):
self.makeTestDrawing()
self._saveImageAndReturnSize(".tiff")
def test_export_bmp(self):
self.makeTestDrawing()
self._saveImageAndReturnSize(".bmp")
def test_imageResolution(self):
self.makeTestDrawing()
with TempFile(suffix=".png") as tmp:
drawBot.saveImage(tmp.path)
self.assertEqual(drawBot.imageSize(tmp.path), (500, 500))
drawBot.saveImage(tmp.path, imageResolution=144)
self.assertEqual(drawBot.imageSize(tmp.path), (1000, 1000))
drawBot.saveImage(tmp.path, imageResolution=36)
self.assertEqual(drawBot.imageSize(tmp.path), (250, 250))
drawBot.saveImage(tmp.path, imageResolution=18)
self.assertEq | ual(drawBot.imageSize(tmp.path), (125, 125))
def test_imagePNGInterlaced(self):
self.makeTestDrawing()
defaultSize = self._saveImageAndReturnSize(".png")
interlacedSize = self._saveImageAndReturnSize(".png", imagePNGInterlaced=True)
# XXX Huh, seems to make no difference, output files are identical
self.assertEqual(defaultSize, i | nterlacedSize)
def test_imagePNGGamma(self):
self.makeTestDrawing()
defaultSize = self._saveImageAndReturnSize(".png")
gammaSize = self._saveImageAndReturnSize(".png", imagePNGGamma=0.8)
self.assertLess(defaultSize, gammaSize)
def test_imageJPEGProgressive(self):
self.makeTestDrawing()
defaultSize = self._saveImageAndReturnSize(".jpg")
progressiveSize = self._saveImageAndReturnSize(".jpg", imageJPEGProgressive=True)
self.assertGreater(defaultSize, progressiveSize)
def test_imageJPEGCompressionFactor(self):
self.makeTestDrawing()
lowCompressionSize = self._saveImageAndReturnSize(".jpg", imageJPEGCompressionFactor=1.0)
mediumCompressionSize = self._saveImageAndReturnSize(".jpg", imageJPEGCompressionFactor=0.5)
highCompressionSize = self._saveImageAndReturnSize(".jpg", imageJPEGCompressionFactor=0.0)
self.assertGreater(lowCompressionSize, mediumCompressionSize)
self.assertGreater(mediumCompressionSize, highCompressionSize)
def test_imageTIFFCompressionMethod(self):
self.makeTestDrawing()
defaultCompressionSize = self._saveImageAndReturnSize(".tif")
noCompressionSize = self._saveImageAndReturnSize(".tif", imageTIFFCompressionMethod=None)
packbitsCompressionSize = self._saveImageAndReturnSize(".tif", imageTIFFCompressionMethod="packbits")
packbits2CompressionSize = self._saveImageAndReturnSize(".tif", imageTIFFCompressionMethod=32773)
packbits3CompressionSize = self._saveImageAndReturnSize(".tif", imageTIFFCompressionMethod="PACKBITS")
lzwCompressionSize = self._saveImageAndReturnSize(".tif", imageTIFFCompressionMethod="lzw")
self.assertEqual(defaultCompressionSize, noCompressionSize)
self.assertEqual(packbitsCompressionSize, packbits2CompressionSize)
self.assertEqual(packbitsCompressionSize, packbits3CompressionSize)
self.assertGreater(noCompressionSize, packbitsCompressionSize)
self.assertGreater(packbitsCompressionSize, lzwCompressionSize)
def test_imageFallbackBackgroundColor(self):
self.makeTestDrawing()
with TempFile(suffix=".jpg") as tmp:
drawBot.saveImage(tmp.path, imageJPEGCompressionFactor=1.0)
self.assertEqual(drawBot.imagePixelColor(tmp.path, (5, 5)), (1.0, 1.0, 1.0, 1.0))
with TempFile(suffix=".jpg") as tmp:
drawBot.saveImage(tmp.path, imageJPEGCompressionFactor=1.0, imageFallbackBackgroundColor=(0, 1, 0))
r, g, b, a = drawBot.imagePixelColor(tmp.path, (5, 5))
self.assertEqual((round(r, 2), round(g, 2), round(b, 2)), (0, 0.97, 0)) # XXX 0.97 vs 1.0 "calibrated" vs "device"
with TempFile(suffix=".jpg") as tmp:
drawBot.saveImage(tmp.path, imageJPEGCompressionFactor=1.0, imageFallbackBackgroundColor=AppKit.NSColor.redColor())
r, g, b, a = drawBot.imagePixelColor(tmp.path, (5, 5))
self.assertEqual((round(r, 2), round(g, 2), round(b, 2)), (1, 0.15, 0))
def _testMultipage(self, extension, numFrames, expectedMultipageCount):
self.makeTestAnimation(numFrames)
with TempFolder() as tmpFolder:
with TempFile(suffix=extension, dir=tmpFolder.path) as tmp:
base, ext = os.path.splitext(tmp.path)
pattern = base + "_*" + ext
self.assertEqual(len(glob.glob(pattern)), 0)
drawBot.saveImage(tmp.path)
self.assertEqual(len(glob.glob(pattern)), 0)
drawBot.saveImage(tmp.path, multipage=False)
self.assertEqual(len(glob.glob(pattern)), 0)
drawBot.saveImage(tmp.path, multipage=True)
self.assertEqual(len(glob.glob(pattern)), expectedMultipageCount)
assert not os.path.exists(tmpFolder.path) # verify TempFolder cleanup
def test_multipage_png(self):
self._testMultipage(".png", numFrames=5, expectedMultipageCount=5)
def test_multipage_jpg(self):
self._testMultipage(".jpg", numFrames=6, expectedMultipageCount=6)
def test_multipage_svg(self):
self._testMultipage(".svg", numFrames=7, expectedMultipageCount=7)
def test_multipage_gif(self):
self._testMultipage(".gif", numFrames=8, expectedMultipageCount=0)
def test_multipage_pdf(self):
self._testMultipage(".pdf", numFrames=9, expectedMultipageCount=0)
def test_animatedGIF(self):
self.makeTestAnimation(5)
with TempFile(suffix=".gif") as tmp:
drawBot.saveImage(tmp.path)
self.assertEqual(gifFrameCount(tmp.path), 5)
def test_saveImage_unknownContext(self):
self.makeTestDrawing()
with self.assertRaises(DrawBotError) as cm:
drawBot.saveImage("foo.abcde")
self.assertEqual(cm.exception.args[0], "Could not find a supported context for: 'abcde'")
with self.assertRaises(DrawBotError) as |
ire their stock levels to be
#: tracked.
track_stock = models.BooleanField(_("Track stock levels?"), default=True)
#: These are the options (set by the user when they add to basket) for this
#: item class. For instance, a product class of "SMS message" would always
#: require a message to be specified before it could be bought.
#: Note that you can also set options on a per-product level.
options = models.ManyToManyField(
'catalogue.Option', blank=True, verbose_name=_("Options"))
class Meta:
abstract = True
app_label = 'catalogue'
ordering = ['name']
verbose_name = _("Product class")
verbose_name_plural = _("Product classes")
def __str__(self):
return self.name
@property
def has_attributes(self):
return self.attributes.exists()
@python_2_unicode_compatible
class AbstractCategory(MP_Node):
"""
A product category. Merely used for navigational purposes; has no
effects on business logic.
Uses django-treebeard.
"""
name = models.CharField(_('Name'), max_length=255, db_index=True)
description = models.TextField(_('Description'), blank=True)
image = models.ImageField(_('Image'), upload_to='categories', blank=True,
| null=True, max_length=255)
slug = SlugField(_('Slug'), max_length=255, db_index=True)
_slug_separator = '/'
_full_name_separator = ' > '
def __str__(self):
return self.full_name
@property
def full_name(self):
"""
Returns a string representation of the category and it's ancestors,
e.g. 'Books > Non-fiction > Essential | programming'.
It's rarely used in Oscar's codebase, but used to be stored as a
CharField and is hence kept for backwards compatibility. It's also
sufficiently useful to keep around.
"""
names = [category.name for category in self.get_ancestors_and_self()]
return self._full_name_separator.join(names)
@property
def full_slug(self):
"""
Returns a string of this category's slug concatenated with the slugs
of it's ancestors, e.g. 'books/non-fiction/essential-programming'.
Oscar used to store this as in the 'slug' model field, but this field
has been re-purposed to only store this category's slug and to not
include it's ancestors' slugs.
"""
slugs = [category.slug for category in self.get_ancestors_and_self()]
return self._slug_separator.join(slugs)
def generate_slug(self):
"""
Generates a slug for a category. This makes no attempt at generating
a unique slug.
"""
return slugify(self.name)
def ensure_slug_uniqueness(self):
"""
Ensures that the category's slug is unique amongst it's siblings.
This is inefficient and probably not thread-safe.
"""
unique_slug = self.slug
siblings = self.get_siblings().exclude(pk=self.pk)
next_num = 2
while siblings.filter(slug=unique_slug).exists():
unique_slug = '{slug}_{end}'.format(slug=self.slug, end=next_num)
next_num += 1
if unique_slug != self.slug:
self.slug = unique_slug
self.save()
def save(self, *args, **kwargs):
"""
Oscar traditionally auto-generated slugs from names. As that is
often convenient, we still do so if a slug is not supplied through
other means. If you want to control slug creation, just create
instances with a slug already set, or expose a field on the
appropriate forms.
"""
if self.slug:
# Slug was supplied. Hands off!
super(AbstractCategory, self).save(*args, **kwargs)
else:
self.slug = self.generate_slug()
super(AbstractCategory, self).save(*args, **kwargs)
# We auto-generated a slug, so we need to make sure that it's
# unique. As we need to be able to inspect the category's siblings
# for that, we need to wait until the instance is saved. We
# update the slug and save again if necessary.
self.ensure_slug_uniqueness()
def get_ancestors_and_self(self):
"""
Gets ancestors and includes itself. Use treebeard's get_ancestors
if you don't want to include the category itself. It's a separate
function as it's commonly used in templates.
"""
return list(self.get_ancestors()) + [self]
def get_descendants_and_self(self):
"""
Gets descendants and includes itself. Use treebeard's get_descendants
if you don't want to include the category itself. It's a separate
function as it's commonly used in templates.
"""
return list(self.get_descendants()) + [self]
def get_absolute_url(self):
"""
Our URL scheme means we have to look up the category's ancestors. As
that is a bit more expensive, we cache the generated URL. That is
safe even for a stale cache, as the default implementation of
ProductCategoryView does the lookup via primary key anyway. But if
you change that logic, you'll have to reconsider the caching
approach.
"""
current_locale = get_language()
cache_key = 'CATEGORY_URL_%s_%s' % (current_locale, self.pk)
url = cache.get(cache_key)
if not url:
url = reverse(
'catalogue:category',
kwargs={'category_slug': self.full_slug, 'pk': self.pk})
cache.set(cache_key, url)
return url
class Meta:
abstract = True
app_label = 'catalogue'
ordering = ['path']
verbose_name = _('Category')
verbose_name_plural = _('Categories')
def has_children(self):
return self.get_num_children() > 0
def get_num_children(self):
return self.get_children().count()
@python_2_unicode_compatible
class AbstractProductCategory(models.Model):
"""
Joining model between products and categories. Exists to allow customising.
"""
product = models.ForeignKey(
'catalogue.Product',
on_delete=models.CASCADE,
verbose_name=_("Product"))
category = models.ForeignKey(
'catalogue.Category',
on_delete=models.CASCADE,
verbose_name=_("Category"))
class Meta:
abstract = True
app_label = 'catalogue'
ordering = ['product', 'category']
unique_together = ('product', 'category')
verbose_name = _('Product category')
verbose_name_plural = _('Product categories')
def __str__(self):
return u"<productcategory for product '%s'>" % self.product
@python_2_unicode_compatible
class AbstractProduct(models.Model):
"""
The base product object
There's three kinds of products; they're distinguished by the structure
field.
- A stand alone product. Regular product that lives by itself.
- A child product. All child products have a parent product. They're a
specific version of the parent.
- A parent product. It essentially represents a set of products.
An example could be a yoga course, which is a parent product. The different
times/locations of the courses would be associated with the child products.
"""
STANDALONE, PARENT, CHILD = 'standalone', 'parent', 'child'
STRUCTURE_CHOICES = (
(STANDALONE, _('Stand-alone product')),
(PARENT, _('Parent product')),
(CHILD, _('Child product'))
)
structure = models.CharField(
_("Product structure"), max_length=10, choices=STRUCTURE_CHOICES,
default=STANDALONE)
upc = NullCharField(
_("UPC"), max_length=64, blank=True, null=True, unique=True,
help_text=_("Universal Product Code (UPC) is an identifier for "
"a product which is not specific to a particular "
" supplier. Eg an ISBN for a book."))
parent = models.ForeignKey(
'self',
blank=True,
nu |
(self, start_item=0, max_items=50):
return self._send_cmd_response(SonosCommand.favradio(start_item, max_items))
def version(self):
return "v1.3\t2015-01-18"
class SonosSpeaker():
def __init__(self):
self.uid = []
self.ip = []
self.model = []
self.zone_name = []
self.zone_icon = []
self.is_coordinator = []
self.serial_number = []
self.software_version = []
self.hardware_version = []
self.m | ac_address = []
self.playlist_position = []
self.volume = []
self.mute = []
self.led = []
self.streamtype = []
self.stop = []
self.pl | ay = []
self.pause = []
self.track_title = []
self.track_artist = []
self.track_duration = []
self.track_position = []
self.track_album_art = []
self.track_uri = []
self.radio_station = []
self.radio_show = []
self.status = []
self.max_volume = []
self.additional_zone_members = []
self.bass = []
self.treble = []
self.loudness = []
self.playmode = []
self.alarms = []
self.tts_local_mode = []
class SonosCommand():
@staticmethod
def subscribe(ip, port):
return {
'command': 'client_subscribe',
'parameter': {
'ip': ip,
'port': port,
}
}
@staticmethod
def unsubscribe(ip, port):
return {
'command': 'client_unsubscribe',
'parameter': {
'ip': ip,
'port': port,
}
}
@staticmethod
def current_state(uid, group_command=0):
return {
'command': 'current_state',
'parameter': {
'uid': '{uid}'.format(uid=uid),
'group_command': int(group_command)
}
}
@staticmethod
def join(uid, value):
return {
'command': 'join',
'parameter': {
'join_uid': '{j_uid}'.format(j_uid=value),
'uid': '{uid}'.format(uid=uid)
}
}
@staticmethod
def unjoin(uid):
return {
'command': 'unjoin',
'parameter': {
'uid': '{uid}'.format(uid=uid)
}
}
@staticmethod
def mute(uid, value, group_command=0):
return {
'command': 'set_mute',
'parameter': {
'uid': '{uid}'.format(uid=uid),
'mute': int(value),
'group_command': int(group_command)
}
}
@staticmethod
def next(uid):
return {
'command': 'next',
'parameter': {
'uid': '{uid}'.format(uid=uid)
}
}
@staticmethod
def previous(uid):
return {
'command': 'previous',
'parameter': {
'uid': '{uid}'.format(uid=uid)
}
}
@staticmethod
def play(uid, value):
return {
'command': 'set_play',
'parameter': {
'play': int(value),
'uid': '{uid}'.format(uid=uid),
}
}
@staticmethod
def pause(uid, value):
return {
'command': 'set_pause',
'parameter': {
'pause': int(value),
'uid': '{uid}'.format(uid=uid),
}
}
@staticmethod
def stop(uid, value):
return {
'command': 'set_stop',
'parameter': {
'stop': int(value),
'uid': '{uid}'.format(uid=uid),
}
}
@staticmethod
def led(uid, value, group_command=0):
return {
'command': 'set_led',
'parameter': {
'led': int(value),
'group_command': int(group_command),
'uid': '{uid}'.format(uid=uid),
}
}
@staticmethod
def volume(uid, value, group_command=0):
return {
'command': 'set_volume',
'parameter': {
'uid': '{uid}'.format(uid=uid),
'volume': int(value),
'group_command': int(group_command)
}
}
@staticmethod
def volume_up(uid, group_command=0):
return {
'command': 'volume_up',
'parameter': {
'group_command': int(group_command),
'uid': '{uid}'.format(uid=uid)
}
}
@staticmethod
def volume_down(uid, group_command=0):
return {
'command': 'volume_down',
'parameter': {
'group_command': int(group_command),
'uid': '{uid}'.format(uid=uid)
}
}
@staticmethod
def max_volume(uid, value, group_command):
return {
'command': 'set_max_volume',
'parameter': {
'max_volume': int(value),
'group_command': int(group_command),
'uid': '{uid}'.format(uid=uid)
}
}
@staticmethod
def seek(uid, value):
return {
'command': 'set_track_position',
'parameter': {
'timestamp': '{value}'.format(value=value),
'uid': '{uid}'.format(uid=uid)
}
}
@staticmethod
def play_uri(uid, uri):
return {
'command': 'play_uri',
'parameter': {
'uri': '{uri}'.format(uri=uri),
'uid': '{uid}'.format(uid=uid)
}
}
@staticmethod
def play_snippet(uid, uri, volume, group_command, fade_in):
return {
'command': 'play_snippet',
'parameter': {
'uri': '{uri}'.format(uri=uri),
'uid': '{uid}'.format(uid=uid),
'volume': int(volume),
'fade_in': int(fade_in),
'group_command': group_command
}
}
@staticmethod
def play_tts(uid, tts, language, volume, group_command, force_stream_mode, fade_in):
return {
'command': 'play_tts',
'parameter': {
'tts': '{tts}'.format(tts=tts),
'language': '{language}'.format(language=language),
'volume': int(volume),
'group_command': int(group_command),
'force_stream_mode': int(force_stream_mode),
'fade_in': int(fade_in),
'uid': '{uid}'.format(uid=uid)
}
}
@staticmethod
def partymode(uid):
return {
'command': 'partymode',
'parameter': {
'uid': '{uid}'.format(uid=uid)
}
}
@staticmethod
def bass(uid, value, group_command=0):
return {
'command': 'set_bass',
'parameter': {
'bass': int(value),
'group_command': int(group_command),
'uid': '{uid}'.format(uid=uid)
}
}
@staticmethod
def playmode(uid, value):
return {
'command': 'set_playmode',
'parameter': {
'playmode': '{playmode}'.format(playmode=value),
'uid': '{uid}'.format(uid=uid)
}
}
@staticmethod
def treble(uid, value, group_command=0):
return {
'command': 'set_treble',
'parameter': {
'treble': int(value),
'group_command': int(group_command),
'uid': '{uid}'.format(uid=uid)
}
}
@staticmethod
def loudness(uid, value, group_command=0):
return {
'command': 'set_loudness',
'parameter': {
'loudness': int(value),
'group_command': int(group_command),
'uid': '{uid}'.format(uid=uid)
}
}
@staticmethod
def get_playlist(uid):
return {
'comman |
# ------------------------------------------------------------------------------
# Security Central
# ------------------------------------------------------------------------------
from .models import User
from pyramid.security import Allow, Everyone, Authenticated, ALL_PERMISSIONS
from pyramid.authentication import SessionAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from .utils.gauth import getSecret, verifyOneTimePassword
import logging
log = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# Configuration
# ----------------- | -------------------------------------------------------------
def includeme(config):
"""Set up the authentication and authorization policies"""
authnPolicy = SessionAuthenticationPolicy(callback=getGroups)
authzPolicy = ACLAuthorizationPolicy()
con | fig.set_authentication_policy(authnPolicy)
config.set_authorization_policy(authzPolicy)
config.set_root_factory(Root)
# Custom predicates
config.add_view_predicate("userNeedsVerification",
UserNeedsVerificationPredicate)
log.info("security set up")
# ------------------------------------------------------------------------------
# Authentication
# ------------------------------------------------------------------------------
def getGroups(name, request):
user = request.user
if user is None:
log.info("getGroups called for non-existant user %s" % name)
return None
if user.usesGauth and not user.gauthVerified:
log.debug("getGroups called for non-verified user %s" % name)
return None
return getGroupsForUser(user)
def getGroupsForUser(user):
groups = []
return groups
class Authentication:
TO_VERIFY, OK, FAILED, LOCKED_OUT = range(4)
def checkAuthentication(name, givenPass):
"""Check the given login and password matches an active user"""
result = Authentication.FAILED
name = name.replace(':', ';')
user = User.getByLogin(name)
if user:
if user.failed_logins < 99:
if givenPass and user.verifyPassword(givenPass):
log.info("User %s password OK" % name)
if user.usesGauth:
user.gauthVerified = False
result = Authentication.TO_VERIFY
else:
result = Authentication.OK
user.failed_logins = 0
else:
log.info("User %s authentication FAILED" % name)
user.failed_logins += 1
else:
log.warning("User %s locked out" % name)
result = Authentication.LOCKED_OUT
else:
log.info("User %s does not exist" % name)
return result, user
def checkVerification(user, givenOtp):
"""Verify the given one-time-password of users who use gauth"""
result = Authentication.FAILED
if user.usesGauth:
if user.failed_logins < 3:
secret = getSecret(user.gauth_key, user.id)
if givenOtp and verifyOneTimePassword(givenOtp, secret):
log.info("User %s verification OK" % user.login)
result = Authentication.OK
user.failed_logins = 0
else:
log.info("User %s verification FAILED" % user.login)
user.failed_logins += 1
else:
log.warning("User %s locked out" % user.login)
result = Authentication.LOCKED_OUT
else:
log.error("User %s does not use gauth!!!" % user.login)
return result
# ------------------------------------------------------------------------------
# View Predicates
# ------------------------------------------------------------------------------
class UserNeedsVerificationPredicate(object):
def __init__(self, flag, config):
self.flag = flag
def text(self):
if self.flag:
return "User does need verification"
else:
return "User does not need verification"
phash = text
def __call__(self, context, request):
user = request.user
needsVerification = user and user.usesGauth and not user.gauthVerified
return self.flag == needsVerification
# ------------------------------------------------------------------------------
# Security Domains
# ------------------------------------------------------------------------------
class Root(dict):
"""The root security domain"""
__acl__ = [(Allow, Everyone, ()),
(Allow, Authenticated, ('view', 'edit', 'play')),
(Allow, 'role:admin', ALL_PERMISSIONS) ]
def __init__(self, request):
pass
class UserSettings(object):
"""The security domain for user settings"""
def __init__(self, request):
self.request = request
@property
def __acl__(self):
# just delegate acl handling to the current user
if self.request.user:
return self.request.user.__acl__
|
from nltk.tokenize import sent_tokenize,word_tokenize
from nltk.corpus import stopwords
from collections import defaultdict
from string import punctuation
from heapq import nlargest
import re
"""
Modified from http://glowingpython.blogspot.co.uk/2014/09/text-summarization-with-nltk.html
"""
class FrequencySummarizer:
def __init__(self, low_thresh=0.1, high_thresh=0.9):
"""
Initialize the text summarizer.
Words that have a frequency term lower than low_thresh
or higer than high_thresh will be ignored.
"""
ignore = ['fig','figure','ibid', 'et al','cf','NB','N.B.']
self._low_thresh = low_thresh
self._high_thresh = high_thresh
self._stopwords = set(stopwords.words('english') + list(punctuation) + list(ignore))
def _compute_frequencies(self, word_tk):
freq = defaultdict(int)
for s in word_tk:
for word in s:
if word not in self._stopwords:
freq[word] += 1
# frequencies normalization and fitering
m = float(max(freq.values()))
for w in freq.keys():
freq[w] = freq[w]/m
if freq[w] >= self._high_thresh or freq[w] <= self._low_thresh:
del freq[w]
return freq
def summarize(self, text, n):
"""
Return a list of n sentences
which represent the summary of text.
"""
text = "".join([unicode(x) for x in text])
sents = sent_tokenize(text)
if n > len(sents):
n = len(sents)
word_tk = [word_tokenize(s.lower()) for s in sents]
self._freq = self._compute_frequencies(word_tk)
ranking = defaultdict(int)
for i,sent in enumerate(word_tk):
for w in | sent:
if w in self._freq and len(w)>4: #Only count words of length>4 as significant
ranking[i] += self._freq[w]
sentsindx = self._rank(ranking, n)
return [sents[j].encode('ascii', errors='backslashreplace') for j in sentsindx]
def _rank(self, ranking, n):
""" return the first n sentences wit | h highest ranking """
return nlargest(n, ranking, key=ranking.get)
|
lf._types_of_builtin_functions
return library
def _get_declarations(self):
lst = [(key, tp) for (key, (tp, qual)) in
self.ffi._parser._declarations.items()]
lst.sort()
return lst
def _generate(self, step_name):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
try:
method = getattr(self, '_generate_cpy_%s_%s' % (kind,
step_name))
except AttributeError:
raise VerificationError(
"not implemented in verify(): %r" % name)
try:
method(tp, realname)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _load(self, module, step_name, **kwds):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
method = getattr(self, '_%s_cpy_%s' % (step_name, kind))
try:
method(tp, realname, module, **kwds)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _generate_nothing(self, tp, name):
pass
def _loaded_noop(self, tp, name, module, **kwds):
pass
# ----------
def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode):
extraarg = ''
if isinstance(tp, model.PrimitiveType):
if tp.is_integer_type() and tp.name != '_Bool':
converter = '_cffi_to_c_int'
extraarg = ', %s' % tp.name
else:
converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''),
tp.name.replace(' ', '_'))
errvalue = '-1'
#
elif isinstance(tp, model.PointerType):
self._convert_funcarg_to_c_ptr_or_array(tp, fromvar,
tovar, errcode)
return
#
elif isinstance(tp, (model.StructOrUnion, model.EnumType)):
# a struct (not a struct pointer) as a function argument
self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)'
% (tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
return
#
elif isinstance(tp, model.FunctionPtrType):
converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('')
extraarg = ', _cffi_type(%d)' % self._gettypenum(tp)
errvalue = 'NULL'
#
else:
raise NotImplementedError(tp)
#
self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg))
self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % (
tovar, tp.get_c_name(''), errva | lue))
self._prnt(' %s;' % errcode)
def _extra_local_variables(self, tp, localvars, freelines):
if isinstance(tp, model.PointerType):
localvars.add('Py_ssize_t datasize')
localvars.add('struct _cffi_freeme_s *large_args_free = NULL')
freelines.add('if (large_args_free != NULL)'
' _cffi_free_array_arguments(large_args_free);')
def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode):
self._ | prnt(' datasize = _cffi_prepare_pointer_call_argument(')
self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % (
self._gettypenum(tp), fromvar, tovar))
self._prnt(' if (datasize != 0) {')
self._prnt(' %s = ((size_t)datasize) <= 640 ? '
'alloca((size_t)datasize) : NULL;' % (tovar,))
self._prnt(' if (_cffi_convert_array_argument(_cffi_type(%d), %s, '
'(char **)&%s,' % (self._gettypenum(tp), fromvar, tovar))
self._prnt(' datasize, &large_args_free) < 0)')
self._prnt(' %s;' % errcode)
self._prnt(' }')
def _convert_expr_from_c(self, tp, var, context):
if isinstance(tp, model.PrimitiveType):
if tp.is_integer_type() and tp.name != '_Bool':
return '_cffi_from_c_int(%s, %s)' % (var, tp.name)
elif tp.name != 'long double':
return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var)
else:
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, (model.PointerType, model.FunctionPtrType)):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.ArrayType):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(model.PointerType(tp.item)))
elif isinstance(tp, model.StructOrUnion):
if tp.fldnames is None:
raise TypeError("'%s' is used as %s, but is opaque" % (
tp._get_c_name(), context))
return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.EnumType):
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
else:
raise NotImplementedError(tp)
# ----------
# typedefs: generates no code so far
_generate_cpy_typedef_collecttype = _generate_nothing
_generate_cpy_typedef_decl = _generate_nothing
_generate_cpy_typedef_method = _generate_nothing
_loading_cpy_typedef = _loaded_noop
_loaded_cpy_typedef = _loaded_noop
# ----------
# function declarations
def _generate_cpy_function_collecttype(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
self._do_collect_type(tp)
else:
# don't call _do_collect_type(tp) in this common case,
# otherwise test_autofilled_struct_as_argument fails
for type in tp.args:
self._do_collect_type(type)
self._do_collect_type(tp.result)
def _generate_cpy_function_decl(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
# cannot support vararg functions better than this: check for its
# exact type (including the fixed arguments), and build it as a
# constant function pointer (no CPython wrapper)
self._generate_cpy_const(False, name, tp)
return
prnt = self._prnt
numargs = len(tp.args)
if numargs == 0:
argname = 'noarg'
elif numargs == 1:
argname = 'arg0'
else:
argname = 'args'
prnt('static PyObject *')
prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname))
prnt('{')
#
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
prnt(' %s;' % type.get_c_name(' x%d' % i, context))
#
localvars = set()
freelines = set()
for type in tp.args:
self._extra_local_variables(type, localvars, freelines)
for decl in sorted(localvars):
prnt(' %s;' % (decl,))
#
if not isinstance(tp.result, model.VoidType):
result_code = 'result = '
context = 'result of %s' % name
prnt(' %s;' % tp.result.get_c_name(' result', context))
prnt(' PyObject *pyresult;')
else:
result_code = ''
#
if len(tp.args) > 1:
rng = range(len(tp.args))
for i in rng:
prnt(' PyObject *arg%d;' % i)
prnt()
prnt(' if (!PyArg_ParseTuple(args, "%s:%s", %s))' % (
'O' * numargs, name, ', '.join(['&arg%d' % i for i in rng])))
prnt(' return NULL;')
prnt()
#
for i, type in enumerate(tp.args):
self._convert |
# -*- coding: utf-8 -*-
# Generated by Django 1.10a1 on 2016-06-19 04:22
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
| migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('author', models.ForeignKey(blank=True, null=True, on_delete=django | .db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
"""Support for monitoring Repetier Server Sensors."""
from datetime import datetime
import logging
import time
from homeassistant.components.sensor import SensorDeviceClass, SensorEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import REPETIER_API, SENSOR_TYPES, UPDATE_SIGNAL, RepetierSensorEntityDescription
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the available Repetier Server sensors."""
if discovery_info is None:
return
sensor_map = {
"bed_temperature": RepetierTempSensor,
"extruder_temperature": RepetierTempSensor,
"chamber_temperature": RepetierTempSensor,
"current_state": RepetierSensor,
"current_job": RepetierJobSensor,
"job_end": RepetierJobEndSensor,
"job_start": RepetierJobStartSensor,
}
entities = []
for info in discovery_info:
printer_name = info["printer_name"]
api = hass.data[REPETIER_API][printer_name]
printer_id = info["printer_id"]
sensor_type = info["sensor_type"]
temp_id = info["temp_id"]
description = SENSOR_TYPES[sensor_type]
name = f"{info['name']}{description.name or ''}"
if temp_id is not None:
_LOGGER.debug("%s Temp_id: %s", sensor_type, temp_id)
name = f"{name}{temp_id}"
sensor_class = sensor_map[sensor_type]
entity = sensor_class(api, temp_id, name, printer_id, description)
entities.append(entity)
add_entities(entities, True)
class RepetierSensor(SensorEntity):
"""Class to create and populate a Repetier Sensor."""
entity_description: RepetierSensorEntityDescription
_attr_should_poll = False
def __init__(
self,
api,
temp_id,
name,
printer_id,
description: RepetierSensorEntityDescription,
):
"""Init new sensor."""
self.entity_description = description
self._api = api
self._attributes: dict = {}
self._temp_id = temp_id
self._printer_id = printer_id
self._state = None
self._attr_name = name
self._attr_available = False
@property
def extra_state_attributes(self):
"""Return sensor attributes."""
return self._attributes
@property
def native_value(self):
"""Return sensor state."""
return self._state
@callback
def update_callback(self):
"""Get new data and update state."""
self.async_schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Connect update callbacks."""
self.async_on_remove(
async_dispatcher_connect(self.hass, UPDATE_SIGNAL, self.update_callback)
)
def _get_data(self):
"""Return new data from the api cache."""
sensor_type = self.entity_description.key
data = self._api.get_data(self._printer_id, sensor_type, self._temp_id)
if data is None:
_LOGGER.debug("Data not found for %s and %s", sensor_type, self._temp_id)
self._attr_available = False
return None
self._attr_available = True
return data
def update(self):
"""Update the sensor."""
if (data := self._get_data()) is None:
return
state = data.pop("state")
_LOGGER.debug("Printer %s State %s", self.name, state)
self._attributes.update(data)
self._state = state
class RepetierTempSensor(RepetierSensor):
"""Represent a Repetier temp sensor."""
@property
def native_value(self):
"""Return sensor state."""
if self._state is None:
return None
return round(self._state, 2)
def update(self):
"""Update the sensor."""
if (data := self._get_data()) is None:
return
state = data.pop("state")
temp_set = data["temp_set"]
_LOGGER.debug("Printer %s Setpoint: %s, Temp: %s", self.name, temp_set, state)
self._attributes.update(data)
self._state = state
class RepetierJobSensor(RepetierSensor):
"""Represent a Repetier job sensor."""
@property
def native_value(self):
"""Return sensor state."""
if self._state is None:
return None
return round(self._state, 2)
class RepetierJobEndSensor(RepetierSensor):
"""Class to create and populate a Repetier Job End timestamp Sensor."""
_attr_device_class = SensorDeviceClass.TIMESTAMP
def update(self):
"""Update the sensor."""
if (data := self._get_data()) is None:
return
job_name = data["job_name"]
start = data["start"]
print_time = data["print_time"]
from_st | art = data["from_start"]
time_end = start + round(print_time, 0)
self._state = datetime.utcfromtimestamp(time_end)
remaining = print_time - from_start
| remaining_secs = int(round(remaining, 0))
_LOGGER.debug(
"Job %s remaining %s",
job_name,
time.strftime("%H:%M:%S", time.gmtime(remaining_secs)),
)
class RepetierJobStartSensor(RepetierSensor):
"""Class to create and populate a Repetier Job Start timestamp Sensor."""
_attr_device_class = SensorDeviceClass.TIMESTAMP
def update(self):
"""Update the sensor."""
if (data := self._get_data()) is None:
return
job_name = data["job_name"]
start = data["start"]
from_start = data["from_start"]
self._state = datetime.utcfromtimestamp(start)
elapsed_secs = int(round(from_start, 0))
_LOGGER.debug(
"Job %s elapsed %s",
job_name,
time.strftime("%H:%M:%S", time.gmtime(elapsed_secs)),
)
|
# lesson4/exercises.py
# Control flow and conditionals
#
# This file contains exercises about Python conditionals.
# Last lesson, we encountered the boolean type.
# Python uses booleans to evaluate conditions.
# Last time, we directly assigned boolean values True and False, but booleans are
# also returned by comparison operations.
# 1. Comparison Operators
# There are multiple comparis | on operators, the most common ones are:
# == for equality
# != for inequality
# | > for greater than
# < for less than
# >= for greater than or equal
# <= for less than or equal
#
# Don't mistake the == operator with the = operator that we learned when studying
# variables.
# The == operator (equal to) asks whether two values are the same as each other and
# returns a boolean (True or False)
# The = operator (assignment) puts the value on the right into the variable on
# the left, and returns nothing.
x = 2 # assignment!
print(x == 2) # comparison! prints out True
print(x == 3) # comparison! prints out False
print(x < 3) # comparison! prints out True
# Exercise 1:
# Try different operators and expressions and see what they evaluate to.
# Experiment with the different types we learned: float, int and even
# strings!
# Remember, you also can compare two variables to each other!
# What happens when you try to compare strings?
# 2. Boolean Operators
# There are three boolean operators (or, and, not). These are used to formulate
# more complex boolean expressions.
#
# and, or: these are called binary operators, because they take in 2 boolean
# values.
# Uncomment the lines below, and see what they evalute to.
# print("True and True equals " + str(True and True))
# print("True and False equals " + str(True and False))
# print("True or True equals " + str(True or True))
# print("True or False equals " + str(True or False))
# The and operator evaluates an expression to True if both Boolean values are
# True; otherwise, it evaluates to False.
# The or operator evaluates an expression to True if either of the two Boolean
# values is True. If both are False, it evaluates to False.
# Truth tables
#
# and truth table:
# True and True -> True
# True and False -> False
# False and True -> False
# False and False -> False
#
#
# or truth table:
# True or True -> True
# True or False -> True
# False or True -> True
# False or False -> False
# The not operator only takes in a single boolean value. It simply inverts the
# boolean value.
# Uncomment the lines below and see what they evaluate to
# print(not True)
# print(not False)
# print(not not True)
# Exercise 2: Creating complex boolean expressions.
# Create three boolean expressions with at least 2 binary operators and 2
# comparison operators. Store each result in a variable and print it.
# For example,
# mybool1 = (2 + 2 == 4 and not 2 + 2 == 5 and 2 * 2 == 2 + 2)
# name = "Maria"
# eggs = 5
# my_age = 21
# mybool2 = (name != "Maria" and not eggs == 5 and my_age < 18)
# Again, experiment with different variable types. See what works and what
# gives an error. If there is an error, can you find out why?
# 3. Conditionals
#
# The Python programs we've written so far have had one-track minds: they can
# add two numbers or print something, but they don't have the ability to pick
# one of these outcomes over the other.
# When developing games, sometimes we'd like our code to be able to make
# decisions.
# Control flow gives us this ability to choose among different paths depending
# on what else is happening in the program.
# The control flow statements we will be learning in this lesson are: if, elif,
# and else.
# Each of these flow control statements decides what to do based on whether its
# condition is True or False.
# In the code below, change the value of the variable name and see what happens
# In special, try making it equal to "Maria"
name = ""
if name == "Maria":
print("Hi, Maria.")
# The if-statement seen above means the following.
# "If this condition is true, execute the code in the block."
# In Python, an if statement consists of the following:
# The if keyword
# A condition (that is, an expression that evaluates to True or False)
# A colon
# Starting on the next line, an indented block of code (called the if block)
# VERY IMPORTANT: Identation in python.
# Lines of Python code can be grouped together in blocks. You can tell when a
# block begins and ends from the indentation of the lines of code. There are
# three rules for blocks.
# Blocks begin when the indentation increases.
# Blocks can contain other blocks.
# Blocks end when the indentation decreases to zero or to an outer
# block's indentation.
# Blocks in Python are indented by 4 spaces more than its containing block.
# Usually, the TAB button will automatically input 4 spaces in IDLE.
# The piece code below shows how indentation works.
name = "Joana"
password = "abacate"
if name == "Joana":
print("Hello Joana")
if password == "abacate":
print("Access granted.")
else:
print("Wrong password.")
# How many levels of indentations are there?
# How many blocks of code?
# An if-statement can optionally be followed by an else-statement.
# The else block will be executed when the if statement's condition is False.
# Try changing the value of the variable password from the piece of code above
# and see what happens.
# The if and else statements allow us to make simple decisions. If we want to
# make our code more complex, we can use an elif statement.
# The elif statement is an "else if" statement that always follows an if or
# another elif statement. It provides another condition that is checked only
# if the previous conditions were False.
name = "Joao"
age = 16
if name == "Maria":
print("Hi, Maria. You might be underage, but it doesn't matter :)")
elif age < 18:
print("You are underage, and you're not Maria! Sorry.")
how_many_potatoes = 4
if how_many_potatoes > 20:
print("lots of potatoes")
elif how_many_potatoes > 5:
print("some potatoes, but not more than 20!")
elif how_many_potatoes > 10:
print("the program will never get here " + \
"because the previous case will be " + \
"true if there are more than 5 potatoes.")
elif how_many_potatoes > 0:
print("a few potatoes")
# It is possible to have multiple elif statements.
# However, notice that an control flow statement must always start with an if
# statement and else statements, if they exist, should always be in the end.
# Exercise 3:
# To practice your newly acquired skills of control flows. Go to the file
# guard.py and fill in the blanks to create a program that detects whether
# someone should be allowed in a super secret club.
# Exercise 4:
# Now we will write our first game in python! Woooooooooah, we are so awesome!
# Go to the file guess_game.py and follow the instructions.
|
import FlicketUser, FlicketGroup
from app | lication.flicket.scripts.hash_password im | port hash_password
admin = 'admin'
# configuration defaults for flicket
flicket_config = {'posts_per_page': 50,
'allowed_extensions': ['txt', 'log', 'pdf', 'png', 'jpg', 'jpeg', 'gif', 'msg', 'doc', 'docx', 'ppt',
'pptx', 'xls', 'xlsx'],
'ticket_upload_folder': 'application/flicket/static/flicket_uploads',
'avatar_upload_folder': 'application/flicket/static/flicket_avatars',
}
# departments and categories defaults for flicket
depart_categories = [
{'department': 'Design', 'category': ['Dataset', 'ECN', 'ECR', 'Other']},
{'department': 'Manufacturing', 'category': ['Process Planning', 'Tooling', 'Equipment', 'Other']},
{'department': 'IT', 'category': ['Internet', 'Intranet', 'Other']},
{'department': 'Quality', 'category': ['Procedures', 'Manuals', 'Other']},
{'department': 'Human Resources', 'category': ['Holidays', 'Sick Leave', 'Other']},
{'department': 'Commercial', 'category': ['Approved Suppliers', 'Other']},
]
class RunSetUP(Command):
def run(self):
WriteConfigJson().json_exists()
username, password, email = self.get_admin_details()
self.set_db_config_defaults()
self.set_email_config()
self.create_admin(username=username, password=password, email=email, job_title='admin')
self.create_notifier()
self.create_admin_group()
self.create_default_ticket_status()
self.create_default_priority_levels()
self.create_default_depts()
# commit changes to the database
db.session.commit()
@staticmethod
def set_db_config_defaults(silent=False):
print('Please enter site base url including port. For example this would be "http://192.168.1.1:8000".')
base_url = input('Base url> ')
count = FlicketConfig.query.count()
if count > 0:
if not silent:
print('Flicket Config database seems to already be populated. Check values via application.')
return
set_config = FlicketConfig(
posts_per_page=flicket_config['posts_per_page'],
allowed_extensions=', '.join(flicket_config['allowed_extensions']),
ticket_upload_folder=flicket_config['ticket_upload_folder'],
avatar_upload_folder=flicket_config['avatar_upload_folder'],
base_url=base_url,
application_title='Flicket',
mail_max_emails=10,
mail_port=465
)
if not silent:
print('Adding config values to database.')
db.session.add(set_config)
db.session.commit()
@staticmethod
def get_admin_details():
# todo: add some password validation to prevent easy passwords being entered
_username = admin
match = False
email = input("Enter admin email: ")
while match is False:
password1 = getpass("Enter password: ")
password2 = getpass("Re-enter password: ")
if password1 != password2:
print("Passwords do not match, please try again.\n\n")
match = False
else:
return _username, password1, email
@staticmethod
def create_admin(username, password, email, job_title, silent=False):
""" creates flicket_admin user. """
query = FlicketUser.query.filter_by(username=username)
if query.count() == 0:
add_user = FlicketUser(username=username,
name=username,
password=hash_password(password),
email=email,
job_title=job_title,
date_added=datetime.datetime.now())
db.session.add(add_user)
if not silent:
print('Admin user added.')
else:
print('Admin user is already added.')
@staticmethod
def create_notifier():
""" creates user for notifications """
query = FlicketUser.query.filter_by(username=app.config['NOTIFICATION']['username'])
if query.count() == 0:
add_user = FlicketUser(username=app.config['NOTIFICATION']['username'],
name=app.config['NOTIFICATION']['name'],
password=hash_password(app.config['NOTIFICATION']['password']),
email=app.config['NOTIFICATION']['email'],
date_added=datetime.datetime.now())
db.session.add(add_user)
print("Notification user added.")
else:
print('Notification user already added.')
@staticmethod
def create_admin_group(silent=False):
""" creates flicket_admin and super_user group and assigns flicket_admin to group admin. """
query = FlicketGroup.query.filter_by(group_name=app.config['ADMIN_GROUP_NAME'])
if query.count() == 0:
add_group = FlicketGroup(group_name=app.config['ADMIN_GROUP_NAME'])
db.session.add(add_group)
if not silent:
print("Admin group added")
user = FlicketUser.query.filter_by(username=admin).first()
group = FlicketGroup.query.filter_by(group_name=app.config['ADMIN_GROUP_NAME']).first()
in_group = False
# see if user flicket_admin is already in flicket_admin group.
for g in group.users:
if g.username == admin:
in_group = True
break
if not in_group:
group.users.append(user)
if not silent:
print("Added flicket_admin user to flicket_admin group.")
# create the super_user group
query = FlicketGroup.query.filter_by(group_name=app.config['SUPER_USER_GROUP_NAME'])
if query.count() == 0:
add_group = FlicketGroup(group_name=app.config['SUPER_USER_GROUP_NAME'])
db.session.add(add_group)
if not silent:
print("super_user group added")
# noinspection PyArgumentList
@staticmethod
def create_default_ticket_status(silent=False):
""" set up default status levels """
sl = ['Open', 'Closed', 'In Work', 'Awaiting Information']
for s in sl:
status = FlicketStatus.query.filter_by(status=s).first()
if not status:
add_status = FlicketStatus(status=s)
db.session.add(add_status)
if not silent:
print('Added status level {}'.format(s))
@staticmethod
def create_default_priority_levels(silent=False):
""" set up default priority levels """
pl = ['low', 'medium', 'high']
for p in pl:
priority = FlicketPriority.query.filter_by(priority=p).first()
if not priority:
add_priority = FlicketPriority(priority=p)
db.session.add(add_priority)
if not silent:
print('Added priority level {}'.format(p))
@staticmethod
def create_default_depts(silent=False):
""" creates default departments and categories. """
for d in depart_categories:
department = d['department']
categories = d['category']
query = FlicketDepartment.query.filter_by(department=department).first()
if not query:
add_department = FlicketDepartment(
department=department
)
db.session.add(add_department)
if not silent:
print("department {} added.".format(department))
for c in categories:
query = FlicketCategory.query.filter_by(category=c).first()
if not query:
add_category = FlicketCategory(
category=c,
department=add_department
|
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
from codecs import open
from setuptools import setup
try:
from azure_bdist_wheel import cmdclass
except ImportError:
from distutils import log as logger
logger.warn("Wheel is not available, disabling bdist_wheel hook")
cmdclass = {}
VERSION = "2.0.17+dev"
# If we have source, validate that our version numbers match
# This should prevent uploading releases with mismatched versions.
try:
with open('azure/cli/__init__.py', 'r', encoding='utf-8') as f:
content = f.read()
except OSError:
pass
else:
import re
import sys
m = re.search(r'__version__\s*=\s*[\'"](.+?)[\'"]', content)
if not m:
print('Could not find __version__ in azure/cli/__init__.py')
sys.exit(1)
if m.group(1) != VERSION:
print('Expected __version__ = "{}"; found "{}"'.format(VERSION, m.group(1)))
sys.exit(1)
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved | :: MIT License',
]
DEPENDENCIES = [
| 'azure-cli-acr',
'azure-cli-acs',
'azure-cli-appservice',
'azure-cli-batch',
'azure-cli-backup',
'azure-cli-billing',
'azure-cli-cdn',
'azure-cli-cloud',
'azure-cli-cognitiveservices',
'azure-cli-component',
'azure-cli-container',
'azure-cli-configure',
'azure-cli-consumption',
'azure-cli-core',
'azure-cli-cosmosdb',
'azure-cli-dla',
'azure-cli-dls',
'azure-cli-eventgrid',
'azure-cli-extension',
'azure-cli-feedback',
'azure-cli-find',
'azure-cli-interactive',
'azure-cli-iot',
'azure-cli-keyvault',
'azure-cli-lab',
'azure-cli-monitor',
'azure-cli-network',
'azure-cli-nspkg',
'azure-cli-profile',
'azure-cli-rdbms',
'azure-cli-redis',
'azure-cli-resource',
'azure-cli-role',
'azure-cli-sql',
'azure-cli-storage',
'azure-cli-vm',
'azure-cli-servicefabric'
]
with open('README.rst', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='azure-cli',
version=VERSION,
description='Microsoft Azure Command-Line Tools',
long_description=README + '\n\n' + HISTORY,
license='MIT',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli',
zip_safe=False,
classifiers=CLASSIFIERS,
scripts=[
'az',
'az.completion.sh',
'az.bat',
],
packages=[
'azure',
'azure.cli',
],
install_requires=DEPENDENCIES,
cmdclass=cmdclass
)
|
NotImplementedError
def clear(self):
"""
Forget about objects in the collection.
"""
self.listing = {}
def find(self, name=None, return_list=False, no_errors=False, **kargs):
"""
Return first object in the collection that maches all item='value'
pairs passed, else return None if no objects can be found.
When return_list is set, can also return a list. Empty list
would be returned instead of None in that case.
"""
matches = []
# support the old style innovation without kwargs
if name is not None:
kargs["name"] = name
kargs = self.__rekey(kargs)
# no arguments is an error, so we don't return a false match
if len(kargs) == 0:
raise CX(_("calling find with no arguments"))
# performance: if the only key is name we can skip the whole loop
if len(kargs) == 1 and kargs.has_key("name") and not return_list:
return self.listing.get(kargs["name"].lower(), None)
for (name, obj) in self.listing.iteritems():
if obj.find_match(kargs, no_errors=no_errors):
matches.append(obj)
if not return_list:
if len(matches) == 0:
return None
return matches[0]
else:
return matches
SEARCH_REKEY = {
'kopts' : 'kernel_options',
'ksmeta' : 'ks_meta',
'inherit' : 'parent',
'ip' : 'ip_address',
'mac' : 'mac_address',
'virt-file-size' : 'virt_file_size',
'virt-ram' : 'virt_ram',
'virt-path' : 'virt_path',
'virt-type' : 'virt_type',
'virt-bridge' : 'virt_bridge',
'virt-cpus' : 'virt_cpus',
'dhcp-tag' : 'dhcp_tag',
'netboot-enabled' : 'netboot_enabled'
}
def __rekey(self,hash):
"""
Find calls from the command line ("cobbler system find")
don't always match with the keys from the datastructs and this
makes them both line up without breaking compatibility w | ith either.
Thankfully we don' | t have a LOT to remap.
"""
newhash = {}
for x in hash.keys():
if self.SEARCH_REKEY.has_key(x):
newkey = self.SEARCH_REKEY[x]
newhash[newkey] = hash[x]
else:
newhash[x] = hash[x]
return newhash
def to_datastruct(self):
"""
Serialize the collection
"""
datastruct = [x.to_datastruct() for x in self.listing.values()]
return datastruct
def from_datastruct(self,datastruct):
if datastruct is None:
return
for seed_data in datastruct:
item = self.factory_produce(self.config,seed_data)
self.add(item)
def rename(self,ref,newname,with_sync=True,with_triggers=True):
"""
Allows an object "ref" to be given a newname without affecting the rest
of the object tree.
"""
# make a copy of the object, but give it a new name.
oldname = ref.name
newref = ref.make_clone()
newref.set_name(newname)
self.add(newref, with_triggers=with_triggers,save=True)
# now descend to any direct ancestors and point them at the new object allowing
# the original object to be removed without orphanage. Direct ancestors
# will either be profiles or systems. Note that we do have to care as
# set_parent is only really meaningful for subprofiles. We ideally want a more
# generic set_parent.
kids = ref.get_children()
for k in kids:
if k.COLLECTION_TYPE == "distro":
raise CX(_("internal error, not expected to have distro child objects"))
elif k.COLLECTION_TYPE == "profile":
if k.parent != "":
k.set_parent(newname)
else:
k.set_distro(newname)
self.api.profiles().add(k, save=True, with_sync=with_sync, with_triggers=with_triggers)
elif k.COLLECTION_TYPE == "system":
k.set_profile(newname)
self.api.systems().add(k, save=True, with_sync=with_sync, with_triggers=with_triggers)
elif k.COLLECTION_TYPE == "repo":
raise CX(_("internal error, not expected to have repo child objects"))
else:
raise CX(_("internal error, unknown child type (%s), cannot finish rename" % k.COLLECTION_TYPE))
# now delete the old version
self.remove(oldname, with_delete=True, with_triggers=with_triggers)
return True
def add(self,ref,save=False,with_copy=False,with_triggers=True,with_sync=True,quick_pxe_update=False,check_for_duplicate_names=False,check_for_duplicate_netinfo=False):
"""
Add an object to the collection, if it's valid. Returns True
if the object was added to the collection. Returns False if the
object specified by ref deems itself invalid (and therefore
won't be added to the collection).
with_copy is a bit of a misnomer, but lots of internal add operations
can run with "with_copy" as False. True means a real final commit, as if
entered from the command line (or basically, by a user).
With with_copy as False, the particular add call might just be being run
during deserialization, in which case extra semantics around the add don't really apply.
So, in that case, don't run any triggers and don't deal with any actual files.
"""
if self.lite_sync is None:
self.lite_sync = action_litesync.BootLiteSync(self.config)
# migration path for old API parameter that I've renamed.
if with_copy and not save:
save = with_copy
if not save:
# for people that aren't quite aware of the API
# if not saving the object, you can't run these features
with_triggers = False
with_sync = False
# Avoid adding objects to the collection
# if an object of the same/ip/mac already exists.
self.__duplication_checks(ref,check_for_duplicate_names,check_for_duplicate_netinfo)
if ref is None or not ref.is_valid():
raise CX(_("insufficient or invalid arguments supplied"))
if ref.COLLECTION_TYPE != self.collection_type():
raise CX(_("API error: storing wrong data type in collection"))
if not save:
# don't need to run triggers, so add it already ...
self.listing[ref.name.lower()] = ref
# perform filesystem operations
if save:
self.log_func("saving %s %s" % (self.collection_type(), ref.name))
# failure of a pre trigger will prevent the object from being added
if with_triggers:
self._run_triggers(ref,"/var/lib/cobbler/triggers/add/%s/pre/*" % self.collection_type())
self.listing[ref.name.lower()] = ref
# save just this item if possible, if not, save
# the whole collection
self.config.serialize_item(self, ref)
if with_sync:
if isinstance(ref, item_system.System):
self.lite_sync.add_single_system(ref.name)
elif isinstance(ref, item_profile.Profile):
self.lite_sync.add_single_profile(ref.name)
elif isinstance(ref, item_distro.Distro):
self.lite_sync.add_single_distro(ref.name)
elif isinstance(ref, item_image.Image):
pass
elif isinstance(ref, item_repo.Repo):
pass
else:
print _("Internal error. Object type not recognized: %s") % type(ref)
if not with_sync and quick_pxe_update:
if isinstance(ref, item_system.System):
self.lite_sync.update_sy |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './viewer.ui'
#
# Created: Sun Aug 23 04:04:27 2009
# by: PyQt4 UI code generator 4.4.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(566,421)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtGui.QLabel(self.centralwidget)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.pathEdit = QtGui.QLineEdit(self.centralwidget)
self.pathEdit.setObjectName("pathEdit")
self.horizontalLayout.addWidget(self.pathEdit)
self.browseButton = QtGui.QPushButton(self.centralwidget)
self.browseButton.setObjectName("browseButton")
self.horizontalLayout.addWidget(self.browseButton)
spacerItem = QtGui.QSpacerItem(40,20,QtGui.QSizePolicy.Fixed,QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.label_3 = QtGui.QLabel(self.centralwidget)
self.label_3.setObjectName("label_3")
self.horizontalLayout.addWidget(self.label_3)
self.ppsSpin = QtGui.QSpinBox(self.centralwidget)
self.ppsSpin.setMaximum(10000)
self.ppsSpin.setSingleStep(10)
self.ppsSpin.setProperty("value",QtCore.QVariant(100))
self.ppsSpin.setObjectName("ppsSpin")
self.horizontalLayout.addWidget(self.ppsSpin)
self.verticalLayout.addLayout(self.horizontalLayout)
self.scrollArea = QtGui.QScrollArea(self.centralwidget)
self.scrollArea.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents_2 = QtGui.QWidget(self.scrollArea)
self.scrollAreaWidgetContents_2.setGeometry(QtCore.QRect(0,1,513,290))
self.scrollAreaWidgetContents_2.setObjectName("scrollAreaWidgetContents_2")
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.scrollAreaWidgetContents_2)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.imageLabel = CursorLabel(self.scrollAreaWidgetContents_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.imageLabel.sizePolicy().hasHeightForWidth())
self.imageLabel.setSizePolicy(sizePolicy)
self.imageLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.imageLabel.setObjectName("imageLabel")
self.horizontalLayout_3.addWidget(self.imageLabel)
self.verticalLayout.addWidget(self.scrollArea)
self.progress = QtGui.QProgressBar(self.centralwidget)
self.progress.setProperty("value",QtCore.QVariant(0))
self.progress.setTextVisible(False)
self.progress.setObjectName("progress")
self.verticalLayout.addWidget(self.progress)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.commandCheck = QtGui.QCheckBox(self.centralwidget)
self.commandCheck.setObjectName("commandCheck")
self.horizontalLayout_2.addWidget(self.commandCheck)
self.commandEdit = QtGui.QLineEdit(self.centralwidget)
self.commandEdit.setObjectName("commandEdit")
self.horizontalLayout_2.addWidget(self.commandEdit)
spacerItem1 = QtGui.QSpacerItem(20,20,QtGui.QSizePolicy.Fixed,QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.centeredCheck = QtGui.QCheckBox(self.centralwidget)
self.centeredCheck.setChecked(True)
self.centeredCheck.setObjectName("centeredCheck")
self.horizontalLayout_2.addWidget(self.centeredCheck)
spacerItem2 = QtGui.QSpacerItem(20,20,QtGui.QSizePolicy.Fixed,QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem2)
self.playButton = QtGui.QPushButton(self.centralwidget)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(170,0,0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active,QtGui.QPalette.Button,brush)
brush = QtGui.QBrush(QtGui.QColor(170,0,0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive,QtGui.QPalette.Button,brush)
brush = QtGui.QBrush(QtGui.QColor(170,0,0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled,QtGui.QPalette.Button,brush)
self.playButton.setPalette(palette)
self.playButton.setObjectName("playButton")
self.horizontalLayout_2.addWidget(self.playButton)
self.stopButton = QtGui.QPushButton(self.centralwidget)
self.stopButton.setObjectName("stopButton")
self.horizontalLayout_2.addWidget(self.stopButton)
self.verticalLayout.addLayout(self.horizontalLayout_2)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "MainWindow", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("MainWindow", "Spectrogram", None, QtGui.QApplication.UnicodeUTF8))
self.browseButton.setText(QtGui.QApplication.translate("MainWindow", "Browse", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("MainWindow", "Pixels per second", None, QtGui.QApplication.UnicodeUTF8))
self.imageLabel.s | etText(QtGui.QApplication.translate("MainWindow", "[image]", None, QtGui.QApplication.UnicodeUTF8))
self.commandCheck.setText(QtGui.QApplication.translate("MainWindow", "Command", None, QtGui.QApplication.UnicodeUTF8))
self.commandEdit.setText(QtGui.QApplication.t | ranslate("MainWindow", "xmms -p", None, QtGui.QApplication.UnicodeUTF8))
self.centeredCheck.setText(QtGui.QApplication.translate("MainWindow", "Centered", None, QtGui.QApplication.UnicodeUTF8))
self.playButton.setText(QtGui.QApplication.translate("MainWindow", "Play", None, QtGui.QApplication.UnicodeUTF8))
self.stopButton.setText(QtGui.QApplication.translate("MainWindow", "Stop", None, QtGui.QApplication.UnicodeUTF8))
from cursorlabel import CursorLabel
|
# -*- coding: utf-8 -*-
"""
Utility that imports a function.
"""
# Future
from __future__ import absolute_import, division, print_function, \
uni | code_literals, with_statement
def import_function(function):
"""Imports function given by qualified package name"""
function = __import__(function, globals(), locals(), ['function'], 0).f
# function = getattr(__import__(function["module"], globals(), locals(), ['function'], 0), function["name"]) TODO
# Note that the following is equivalent:
# from MyPackage.MyModule import f as func | tion
# Also note this always imports the function "f" as "function".
return function
|
#!/usr/bin/env python3
"""
>>> baralho = Baralho()
>>> len(baralho)
52
>>> baralho[0]
Carta(valor='2', naipe='paus')
>>> baralho[-1]
Carta(valor='A', naipe='espadas')
>>> from random import choice
>>> choice(baralho) #doctest:+SKIP
Carta(valor='4', naipe='paus')
>>> choice(baralho) #doctest:+SKIP
Carta(valor='A', naipe='espadas')
>>> choice(baralho) #doctest:+SKIP
Carta(valor='8', naipe='espadas')
| >>> baralho[:5] #doctest:+NORMALIZE_WHITESPACE
[Carta(valor='2', naipe='paus'), Carta(valor='3', naipe='paus'),
Carta(valor='4', naipe='paus'), Carta(valor='5', naipe='paus'),
Carta(valor='6', naipe='paus')]
>>> baralho[-3:] #doctest:+NORMALIZE_WHITESPACE
[Carta(valor='Q', naipe='espadas'),
Carta(valor='K', naipe='espadas'),
Carta(valor='A', naipe='espadas')]
>>> for carta in baralho: | #doctest:+ELLIPSIS
... print(carta)
...
Carta(valor='2', naipe='paus')
Carta(valor='3', naipe='paus')
Carta(valor='4', naipe='paus')
...
Carta(valor='Q', naipe='espadas')
Carta(valor='K', naipe='espadas')
Carta(valor='A', naipe='espadas')
To generate a reversed listing:
::
>>> for carta in reversed(baralho): #doctest:+ELLIPSIS
... print(carta)
...
Carta(valor='A', naipe='espadas')
Carta(valor='K', naipe='espadas')
Carta(valor='Q', naipe='espadas')
...
Carta(valor='4', naipe='paus')
Carta(valor='3', naipe='paus')
Carta(valor='2', naipe='paus')
For a numbered listing, we use `enumerate`:
::
>>> for n, carta in enumerate(baralho, 1): #doctest:+ELLIPSIS
... print(format(n, '2'), carta)
...
1 Carta(valor='2', naipe='paus')
2 Carta(valor='3', naipe='paus')
3 Carta(valor='4', naipe='paus')
...
50 Carta(valor='Q', naipe='espadas')
51 Carta(valor='K', naipe='espadas')
52 Carta(valor='A', naipe='espadas')
Get all the Jacks in a baralho.
::
>>> [carta for carta in baralho if carta.valor=='J']
[Carta(valor='J', naipe='paus'), Carta(valor='J', naipe='ouros'), Carta(valor='J', naipe='copas'), Carta(valor='J', naipe='espadas')]
Ranking by alternate color naipes: ouros (lowest), followed by paus, copas, and espadas (highest).
>>> hand = [Carta(valor='2', naipe='ouros'), Carta(valor='2', naipe='paus'),
... Carta(valor='3', naipe='ouros'), Carta(valor='3', naipe='paus'),
... Carta(valor='A', naipe='espadas')]
>>> [cores_alternadas(carta) for carta in hand]
[0, 1, 4, 5, 51]
>>> hand = [Carta(valor='A', naipe='espadas'),
... Carta(valor='K', naipe='ouros'),
... Carta(valor='A', naipe='ouros')]
>>> for carta in sorted(hand,key=cores_alternadas):
... print(carta)
Carta(valor='K', naipe='ouros')
Carta(valor='A', naipe='ouros')
Carta(valor='A', naipe='espadas')
>>> for carta in sorted(baralho, key=cores_alternadas): #doctest:+ELLIPSIS
... print(carta)
Carta(valor='2', naipe='ouros')
Carta(valor='2', naipe='paus')
Carta(valor='2', naipe='copas')
Carta(valor='2', naipe='espadas')
Carta(valor='3', naipe='ouros')
...
Carta(valor='A', naipe='copas')
Carta(valor='A', naipe='espadas')
>>> from random import shuffle
>>> shuffle(baralho)
"""
import collections
Carta = collections.namedtuple('Carta', ['valor', 'naipe'])
class Baralho:
valores = [str(n) for n in range(2,11)] + list('JQKA')
naipes = 'paus ouros copas espadas'.split()
def __init__(self):
self.cartas = [Carta(v, n) for n in self.naipes for v in self.valores]
def __len__(self):
return len(self.cartas)
def __getitem__(self, posicao):
return self.cartas[posicao]
def __setitem__(self, posicao, carta):
self.cartas[posicao] = carta
def cores_alternadas(carta):
valor_value = Baralho.valores.index(carta.valor)
naipes = 'ouros paus copas espadas'.split()
return valor_value * len(naipes) + naipes.index(carta.naipe)
|
md5_source = self.session.cmd("%s %s" % (params["md5_cmd"],
source_file))
try:
md5_source = md5_source.split(" ")[0]
except IndexError:
error.TestError("Failed to get md5 from source file,"
" output: '%s'" % md5_source)
else:
md5_source = None
self.session.cmd("%s %s %s" % (params["copy_cmd"], source_file,
dest_file))
logging.info("Succeed to copy file '%s' into floppy disk" %
source_file)
error.context("Checking if the file is unchanged after copy")
if md5_cmd:
md5_dest = self.session.cmd("%s %s" % (params["md5_cmd"],
dest_file))
try:
md5_dest = md5_dest.split(" ")[0]
except IndexError:
error.TestError("Failed to get md5 from dest file,"
" output: '%s'" % md5_dest)
if md5_source != md5_dest:
raise error.TestFail("File changed after copy to floppy")
else:
md5_dest = None
self.session.cmd("%s %s %s" % (params["diff_file_cmd"],
source_file, dest_file))
def c | lean(self):
clean_cmd = "%s %s" % (params["clean_cmd"], dest_file)
self.session.cmd(clean_cmd)
if self.dest_dir: |
self.session.cmd("umount %s" % self.dest_dir)
self.session.close()
class Multihost(MiniSubtest):
def test(self):
error.context("Preparing migration env and floppies.")
mig_protocol = params.get("mig_protocol", "tcp")
self.mig_type = utils_test.MultihostMigration
if mig_protocol == "fd":
self.mig_type = utils_test.MultihostMigrationFd
if mig_protocol == "exec":
self.mig_type = utils_test.MultihostMigrationExec
self.vms = params.get("vms").split(" ")
self.srchost = params["hosts"][0]
self.dsthost = params["hosts"][1]
self.is_src = params["hostid"] == self.srchost
self.mig = self.mig_type(test, params, env, False, )
if self.is_src:
self.floppy = create_floppy(params)
self.floppy_dir = os.path.dirname(self.floppy)
params["start_vm"] = "yes"
env_process.process(test, params, env,
env_process.preprocess_image,
env_process.preprocess_vm)
vm = env.get_vm(self.vms[0])
vm.wait_for_login(timeout=login_timeout)
else:
self.floppy = create_floppy(params, False)
self.floppy_dir = os.path.dirname(self.floppy)
def clean(self):
self.mig.cleanup()
if self.is_src:
cleanup_floppy(self.floppy)
class test_multihost_write(Multihost):
def test(self):
super(test_multihost_write, self).test()
copy_timeout = int(params.get("copy_timeout", 480))
self.mount_dir = params["mount_dir"]
format_floppy_cmd = params["format_floppy_cmd"]
check_copy_path = params["check_copy_path"]
pid = None
sync_id = {'src': self.srchost,
'dst': self.dsthost,
"type": "file_trasfer"}
filename = "orig"
if self.is_src: # Starts in source
vm = env.get_vm(self.vms[0])
session = vm.wait_for_login(timeout=login_timeout)
if self.mount_dir:
session.cmd("rm -f %s" % (os.path.join(self.mount_dir,
filename)))
session.cmd("rm -f %s" % (check_copy_path))
# If mount_dir specified, treat guest as a Linux OS
# Some Linux distribution does not load floppy at boot
# and Windows needs time to load and init floppy driver
error.context("Prepare floppy for writing.")
if self.mount_dir:
lsmod = session.cmd("lsmod")
if not 'floppy' in lsmod:
session.cmd("modprobe floppy")
else:
time.sleep(20)
session.cmd(format_floppy_cmd)
error.context("Mount and copy data")
if self.mount_dir:
session.cmd("mount -t vfat %s %s" % (guest_floppy_path,
self.mount_dir),
timeout=30)
error.context("File copying test")
pid = lazy_copy(vm, os.path.join(self.mount_dir, filename),
check_copy_path, copy_timeout)
sync = SyncData(self.mig.master_id(), self.mig.hostid,
self.mig.hosts, sync_id, self.mig.sync_server)
pid = sync.sync(pid, timeout=floppy_prepare_timeout)[self.srchost]
self.mig.migrate_wait([self.vms[0]], self.srchost, self.dsthost)
if not self.is_src: # Starts in destination
vm = env.get_vm(self.vms[0])
session = vm.wait_for_login(timeout=login_timeout)
error.context("Wait for copy finishing.")
status = int(session.cmd_status("kill %s" % pid,
timeout=copy_timeout))
if not status in [0]:
raise error.TestFail("Copy process was terminatted with"
" error code %s" % (status))
session.cmd_status("kill -s SIGINT %s" % (pid),
timeout=copy_timeout)
error.context("Check floppy file checksum.")
md5_cmd = params.get("md5_cmd")
if md5_cmd:
md5_floppy = session.cmd("%s %s" % (params.get("md5_cmd"),
os.path.join(self.mount_dir, filename)))
try:
md5_floppy = md5_floppy.split(" ")[0]
except IndexError:
error.TestError("Failed to get md5 from source file,"
" output: '%s'" % md5_floppy)
md5_check = session.cmd("%s %s" % (params.get("md5_cmd"),
check_copy_path))
try:
md5_check = md5_check.split(" ")[0]
except IndexError:
error.TestError("Failed to get md5 from source file,"
" output: '%s'" % md5_floppy)
if md5_check != md5_floppy:
raise error.TestFail("There is mistake in copying,"
" it is possible to check file on vm.")
session.cmd("rm -f %s" % (os.path.join(self.mount_dir,
filename)))
session.cmd("rm -f %s" % (check_copy_path))
self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts,
'finish_floppy_test', login_timeout)
def clean(self):
super(test_multihost_write, self).clean()
class test_multihost_eject(Multihost):
def test(self):
super(test_multihost_eject, self).test()
self.mount_dir = params.get("mount_dir", None)
format_floppy_cmd = params["format_floppy_cmd"]
floppy = params["floppy_name"]
second_floppy = params["second_floppy_name"]
if not os.path.isabs(floppy):
floppy = os.p |
ch callback.cli/callback.C
class TestCallbackResults(unittest.TestCase):
def test_get_item(self):
cb = CallbackBase()
results = {'item': 'some_item'}
res = cb._get_item(results)
self.assertEquals(res, 'some_item')
def test_get_item_no_log(self):
cb = CallbackBase()
results = {'item': 'some_item', '_ansible_no_log': True}
res = cb._get_item(results)
self.assertEquals(res, "(censored due to no_log)")
results = {'item': 'some_item', '_ansible_no_log': False}
res = cb._get_item(results)
self.assertEquals(res, "some_item")
def test_clean_results(self):
cb = CallbackBase()
result = {'item': 'some_item',
'invocation': 'foo --bar whatever [some_json]',
'changed': True}
self.assertTrue('changed' in result)
self.assertTrue('invocation' in result)
cb._clean_results(result, 'debug')
class TestCallbackDumpResults(unittest.TestCase):
def test_internal_keys(self):
cb = CallbackBase()
result = {'item': 'some_item',
'_ansible_some_var': 'SENTINEL',
'testing_ansible_out': 'should_be_left_in LEFTIN',
'invocation': 'foo --bar whatever [some_json]',
'some_dict_key': {'a_sub_dict_for_key': 'baz'},
'bad_dict_key': {'_ansible_internal_blah': 'SENTINEL'},
'changed': True}
json_out = cb._dump_results(result)
self.assertFalse('"_ansible_' in json_out)
self.assertFalse('SENTINEL' in json_out)
self.assertTrue('LEFTIN' in json_out)
def test_no_log(self):
cb = CallbackBase()
result = {'item': 'some_item',
'_ansible_no_log': True,
'some_secrets': 'SENTINEL'}
json_out = cb._dump_results(result)
self.assertFalse('SENTINEL' in json_out)
self.assertTrue('no_log' in json_out)
self.assertTrue('output has been hidden' in json_out)
def test_exception(self):
cb = CallbackBase()
result = {'item': 'some_item LEFTIN',
'exception': ['frame1', 'SENTINEL']}
json_out = cb._dump_results(result)
self.assertFalse('SENTINEL' in json_out)
self.assertFalse('exception' in json_out)
self.assertTrue('LEFTIN' in json_out)
def test_verbose(self):
cb = CallbackBase()
result = {'item': 'some_item LEFTIN',
'_ansible_verbose_always': 'chicane'}
json_out = cb._dump_results(result)
self.assertFalse('SENTINEL' in json_out)
self.assertTrue('LEFTIN' in json_out)
def test_diff(self):
cb = CallbackBase()
result = {'item': 'some_item LEFTIN',
'diff': ['remove stuff', 'added LEFTIN'],
'_ansible_verbose_always': 'chicane'}
json_out = cb._dump_results(result)
self.assertFalse('SENTINEL' in json_out)
self.assertTrue('LEFTIN' in json_out)
# TODO: triggr the 'except UnicodeError' around _get_diff
# that try except orig appeared in 61d01f549f2143fd9adfa4ffae42f09d24649c26
# in 2013 so maybe a < py2.6 issue
class TestCallbackDiff(unittest.TestCase):
def setUp(self):
self.cb = CallbackBase()
def _strip_color(self, s):
return re.sub('\033\\[[^m]*m', '', s)
def test_difflist(self):
# TODO: split into smaller tests?
difflist = [{'before': ['preface\nThe Before String\npostscript'],
'after': ['preface\nThe After String\npostscript'],
'before_header': 'just before',
'after_header': 'just after'
},
{'before': ['preface\nThe Before String\npostscript'],
'after': ['preface\nThe After String\npostscript'],
},
{'src_binary': 'chicane'},
{'dst_binary': 'chicanery'},
{'dst_larger': 1},
{'src_larger': 2},
{'prepared': 'what does prepared do?'},
{'before_header': 'just before'},
{'after_header': 'just after'}]
res = self.cb._get_diff(difflist)
self.assertIn('Before String', res)
self.assertIn('After String', res)
self.assertIn('just before', res)
self.assertIn('just after', res)
def test_simple_diff(self):
self.assertMultiLineEqual(
self._strip_color(self.cb._get_diff({
'before_header': 'somefile.txt',
'after_header': 'generated from template somefile.j2',
'before': 'one\ntwo\nthree\n',
'after': 'one\nthree\nfour\n',
})),
textwrap.dedent('''\
--- before: somefile.txt
+++ after: generated from template somefile.j2
@@ -1,3 +1,3 @@
one
-two
three
+four
'''))
def test_new_file(self):
self.assertMultiLineEqual(
self._strip_color(self.cb._get_diff({
'before_header': 'somefile.txt',
'after_header': 'generated from template somefile.j2',
'before': '',
'after': 'one\ntwo\nthree\n',
})),
textwrap.dedent('''\
--- before: somefile.txt
+++ after: generated from template somefile.j2
@@ -0,0 +1,3 @@
+one
+two
+three
'''))
def test_clear_file(self):
self.assertMultiLineEqual(
self._strip_color(self.cb._get_diff({
'before_header': 'somefile.txt',
'after_header': 'generated from template somefile.j2',
'before': 'one\ntwo\nthree\n',
'after': '',
})),
textwrap.dedent('''\
--- before: somefile.txt
+++ after: generated from template somefile.j2
@@ -1,3 +0,0 @@
-one
-two
-three
'''))
def test_no_trailing_newline_before(self):
self.assertMultiLineEqual(
self._strip_color(self.cb._get_diff({
'before_header': 'somefile.txt',
'after | _header': 'generated from template somefile.j2',
'before': 'one\ntwo\nthree',
'after': 'one\ntwo\nthree\n',
})),
textwrap.dedent('''\
--- before: somefile.txt
+++ after: generated from template somefile.j2
@@ -1,3 +1,3 @@
one
two
-three
\\ No newline at end of fil | e
+three
'''))
def test_no_trailing_newline_after(self):
self.assertMultiLineEqual(
self._strip_color(self.cb._get_diff({
'before_header': 'somefile.txt',
'after_header': 'generated from template somefile.j2',
'before': 'one\ntwo\nthree\n',
'after': 'one\ntwo\nthree',
})),
textwrap.dedent('''\
--- before: somefile.txt
+++ after: generated from template somefile.j2
@@ -1,3 +1,3 @@
one
two
-three
+three
\\ No newline at end of file
'''))
def test_no_trailing_newline_both(self):
self.assertMultiLineEqual(
self.cb._get_diff({
'before_header': 'somefile.txt',
'after_header': 'generated from template somefile.j2',
'before': 'one\ntwo\nthree',
'after': 'one\ntwo\nthree',
}),
'')
def test_no_trailing_newline_both_with_some_changes(self):
self.assertMultiLineEqual(
self._strip_color(self.cb._get_diff({
'before_header': 'somef |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 KenV99
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
WM_HOTKEY = 786
VK_SHIFT = 16
VK_CONTROL = 17
VK_SPACE = 32
VK_HOME = 36
VK_LEFT = 37
VK_UP = 38
VK_RIGHT = 39
VK_DOWN = 40
VK_INSERT = 45
VK_DELETE = 46
VK_HELP = 47
VK_LWIN = 91
VK_RWIN = 92
VK_APPS = 93
VK_NUMPAD0 = 96
VK_NUMPAD1 = 97
VK_NUMPAD2 = 98
VK_NUMPAD3 = 99
VK_NUMPAD4 = 100
VK_NUMPAD | 5 = 101
VK_NUMPAD6 = 102
VK_NUMPAD7 = 103
VK_NUMPAD8 = 104
VK_NUMPAD9 = 105
VK_F1 = 112
VK_F2 = 113
VK_F3 = 114
VK_F4 = 115
VK_F5 = 116
VK_F6 = 117
VK_F7 = 118
VK_F8 = 119
VK_F9 = 120
VK_F10 = 121
VK_F11 = 122
VK_F12 = 123
VK_F13 = 124
VK_F14 = 125
VK_F15 = 126
VK_F16 = 127
VK_F17 = 128
VK_F18 = 129
VK_F19 = 130
VK_F20 = 131
VK_F21 = 132
VK_F22 = 133
VK_F23 = 134
VK_F24 = 135
VK_NUMLOCK = 144
VK_SCROLL = 145
VK_LSHIFT = 160
VK_RSHIFT = 161
VK_LCONTROL = 162
VK_RCONTROL = 163
MOD_ | ALT = 1
MOD_CONTROL = 2
MOD_SHIFT = 4
MOD_WIN = 8 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This file is part of XBMC Mega Pack Addon.
Copyright (C) 2014 Wolverine (xbmcmegapack@gmail.com)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see http://www.gnu.org/licenses/gpl-3.0.html
"""
class Countries_ | Togo():
'''Class that manages this specific menu context.' | ''
def open(self, plugin, menu):
menu.add_xplugins(plugin.get_xplugins(dictionaries=["Channels",
"Events", "Live", "Movies", "Sports", "TVShows"],
countries=["Togo"])) |
from joueur.delta_mergeable import DeltaMergeable
from joueur.base_game_object import BaseGameObject
from joueur.utilities import camel_case_converter
from joueur.serializer import is_game_object_reference, is_object
# @class GameManager: managed the game and it's game objects including unserializing deltas
class GameManager():
def __init__(self, game):
self.game = game
self._game_object_classes = game._game_object_classes
def set_constants(self, constants):
self._server_constants = constants
self._DELTA_REMOVED = constants['DELTA_REMOVED']
self._DELTA_LIST_LENGTH = constants['DELTA_LIST_LENGTH']
## applies a delta state (change in state information) to this game
def apply_delta_state(self, delta):
if 'gameObjects' in delta:
self._init_game_objects(delta['gameObjects'])
self._merge_delta(self.game, delta)
## game objects can be refences in the delta states for cycles, they will all point to the game objects here.
def _init_game_objects(self, delta_game_objects):
for id, obj in delta_game_objects.items():
if not id in self.game._game_objects: # then we need to create it
self.game._game_objects[id] = self._game_object_classes[obj['gameObjectName']]()
## recursively merges delta changes to the game.
def _merge_delta(self, state, delta):
delta_length = -1
if self._DELTA_LIST_LENGTH in delta:
delta_length = delta[self._DELTA_LIST_LENGTH]
del delta[self._DELTA_LIST_LENGTH] # we don't want to copy this key/value over to the state, it was just to signify it is an array
if delta_length > -1: # then this part in the state is an array
while len(state) > delta_length: # remove elements off the array to make it's size correct.
state.pop()
while len(state) < delta_length: # append elements on the array to make it's size correct.
state.append(None)
for key in delta: # deltas will always be objects when iterating through, arrays just have keys of numbers
d = delta[key]
state | _key = key # array's keys are real numbers, not strings e.g. "1"
key_in_state = False
if isinstance(state, list):
state_key = int(key)
key_in_state = state_key < len(state)
else:
if isinstance(state, DeltaMergeable):
state_key = "_" + camel_case_converter(state_key)
key_in_state = state_key in state
value = d
| if d == self._DELTA_REMOVED:
value = None
if key_in_state:
del state[state_key]
elif is_game_object_reference(d): # then this is a shallow reference to a game object
value = self.game.get_game_object(d['id'])
elif is_object(d) and key_in_state and is_object(state[state_key]):
value = None
self._merge_delta(state[state_key], d)
elif not key_in_state and is_object(d):
if isinstance(d, dict):
state[state_key] = [] if d in self._DELTA_LIST_LENGTH else {}
value = None
self._merge_delta(state[state_key], d)
if value != None:
if isinstance(state_key, int) or isinstance(state, dict):
state[state_key] = value
else:
setattr(state, state_key, value)
|
from .entity import Entity
class Edge | (Entity):
"""Basic class for all edge objects"""
meta = {
"ontology": "gch",
"typename": "Edge",
"hierarchy": "gch/Entity.Edge"
}
def __init__(self, attributes={}, tags=set([])):
| super(Edge, self).__init__(attributes, tags)
|
# --------------------------------------------------------
# Theano @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# -------------------------------------------------------- |
import numpy as np
import dragon.core.workspace as ws
from dragon.core.tensor import Tensor, GetTensorName
def shared(value, name=None, **kwargs):
"""Construct a Tensor initialized with ``value``.
Parameters
----------
value : basic type, list or numpy.ndarray
The numerical values.
name : str
The name of tensor.
Returns
-------
Tensor |
The initialized tensor.
"""
if not isinstance(value, (int, float, list, np.ndarray)):
raise TypeError("Unsupported type of value: {}".format(type(value)))
if name is None: name = GetTensorName()
tensor = Tensor(name).Variable()
ws.FeedTensor(tensor, value)
return tensor |
impo | rt os
def generate_enum(path, localizations):
full_path = path + "/" + "Language.swift"
if not os.path.isfile(full_path):
enum_file = open(full_path, 'w+')
enum_file.write("import Foundation\n\n")
enum_file.write("enum Language: String {\n")
enum_file.write("\tprivate static let languag | eKey = \"AppleLanguages\"\n")
enum_file.write("\tprivate static var currentLanguage: Language?\n\n")
for localization in localizations:
enum_file.write("\tcase " + localization + " = \"" + localization + "\"\n")
enum_file.write("\tcase Undefined = \"\"\n\n")
enum_file.write("\tstatic func getCurrentLanguage() -> Language {\n")
enum_file.write("\t\tif let language = currentLanguage {\n")
enum_file.write("\t\t\treturn language\n")
enum_file.write("\t\t}\n\n")
enum_file.write("\t\tif let array = UserDefaults.standard.stringArray(forKey: languageKey), let label = array.first, let language = Language(rawValue: label) {\n")
enum_file.write("\t\t\tcurrentLanguage = language\n")
enum_file.write("\t\t\treturn language\n")
enum_file.write("\t\t}\n\n")
enum_file.write("\t\treturn .Undefined\n")
enum_file.write("\t}\n\n")
enum_file.write("\tstatic func setCurrentLanguage(language: Language) {\n")
enum_file.write("\t\tcurrentLanguage = language\n")
enum_file.write("\t\tUserDefaults.standard.set([language.rawValue], forKey: languageKey)\n")
enum_file.write("\t}\n\n")
enum_file.write("}")
enum_file.close()
|
s and
variables of other packages.
Scheme of the db
----------------
# {'function_name':
# {'variables':
# {'variable_name':
# {'question_info':
# {'qtype': ['simple_input', 'confirmation_question',
# 'selection_options', 'selection_list_options'],
# 'question_spec': 'question_spec'},
# 'default': default}},
########
# 'descendants': [{'agg_description':
# {variable_name:
# {'variable_value': 'function_name'}
# },
# 'agg_name': 'aggregated_parameter_name'}]
# }}
######## OR
# 'descendants': [{'agg_description': 'function_name'
# 'agg_name': 'aggregated_parameter_name'}]
# }}
#TODO: checker 1 function with list of functions and dicts of dicts
"""
from tui_questioner import general_questioner
def check_quest_info(db):
"""Function which carry out the automatic checking of the database of
function and variables.
Parameters
----------
db: dict
the dictionary of all the information about the system with all its
functions and dependencies between them in order to ask for their
variables authomatically.
Returns
-------
check: boolean
returns the correctness of the database.
path: list
path of the possible error.
message: str
message of the error if it exists.
"""
## 0. Initial preset variables needed
# Function to compare lists
def equality_elements_list(a, b):
a = a.keys() if type(a) == dict else a
b = b.keys() if type(b) == dict else b
c = a[-1::-1]
return a == b or c == b
# List of elements available in some dicts at some levels
first_level = ['descendants', 'variables']
| desc_2_level = ['agg_description', 'agg_name']
vars_2_level = ['question_info', 'default']
vars_3_level = ['qtype', 'question_spec']
# Messages of errors
m0 = "The given database of functions is not a dictionary."
m1 = "The function '%s' does not have "+str(first_level)+" as keys."
m2 = "The variables of function '%s' is not a dict."
m3 = "Incorrect keys "+str(vars_2_level)+" in function %s and variable %s."
m4 = "Incorrec | t question_info format for function %s and variable %s."
m5 = "Not a string the 'qtype' of function %s and variable %s."
m6 = "Incorrect 'question_spec' format for function %s and variable %s."
m7 = "Descendants of the function %s is not a list."
m8 = "Elements of the list of descendants not a dict for function %s."
m9 = "Incorrect structure of a dict in descendants for function %s."
m10 = "Incorrect type of agg_description for function %s and variable %s."
m11 = "Incorrect type of agg_description for function %s."
## Check db is a dict
if type(db) != dict:
return False, [], m0
## Loop for check each function in db
for funct in db.keys():
## Check main keys:
first_bl = equality_elements_list(db[funct], first_level)
if not first_bl:
return False, [funct], m1 % funct
## Check variables
if not type(db[funct]['variables']) == dict:
check = False
path = [funct, 'variables']
message = m2 % funct
return check, path, message
for var in db[funct]['variables']:
varsbles = db[funct]['variables']
v2_bl = equality_elements_list(varsbles[var], vars_2_level)
v3_bl = equality_elements_list(varsbles[var]['question_info'],
vars_3_level)
qtype_bl = db[funct]['variables'][var]['question_info']['qtype']
qtype_bl = type(qtype_bl) != str
qspec_bl = db[funct]['variables'][var]['question_info']
qspec_bl = type(qspec_bl['question_spec']) != dict
if not v2_bl:
check = False
path = [funct, 'variables', var]
message = m3 % (funct, var)
return check, path, message
### Check question_info
if not v3_bl:
check = False
path = [funct, 'variables', 'question_info']
message = m4 % (funct, var)
return check, path, message
if qtype_bl:
check = False
path = [funct, 'variables', 'question_info', 'qtype']
message = m5 % (funct, var)
return check, path, message
if qspec_bl:
check = False
path = [funct, 'variables', 'question_info', 'question_spec']
message = m6 % (funct, var)
return check, path, message
## Check descendants
if not type(db[funct]['descendants']) == list:
check = False
path = [funct, 'descendants']
message = m7 % funct
return check, path, message
for var_desc in db[funct]['descendants']:
if not type(var_desc) == dict:
check = False
path = [funct, 'descendants']
message = m8 % funct
return check, path, message
d2_bl = equality_elements_list(var_desc.keys(), desc_2_level)
if not d2_bl:
check = False
path = [funct, 'descendants']
message = m9 % funct
return check, path, message
if type(var_desc['agg_description']) == str:
pass
elif type(var_desc['agg_description']) == dict:
for varname in var_desc['agg_description']:
if not type(var_desc['agg_description'][varname]) == dict:
check = False
path = [funct, 'descendants', 'agg_description']
message = m10 % (funct, varname)
return check, path, message
else:
check = False
path = [funct, 'descendants', 'agg_description']
message = m11 % funct
return check, path, message
return True, [], ''
def automatic_questioner(function_name, db, choosen={}):
"""Function which carry out the automatic questioning task.
Parameters
----------
function_name: str
the function for which we are interested in their params in order to
call it.
db: dict
the dictionary of all the information about the system with all its
functions and dependencies between them in order to ask for their
variables authomatically.
choosen: dict
previous choosen parameters. The function will avoid to ask for the
pre-set parameters.
Returns
-------
choosen_values: dict
the selected values which are disposed to input in the function we want
to call.
"""
## Initialize variables needed
m1 = "Not value for a variables in order to create aggregate variables."
choosen_values = choosen
if function_name in db.keys():
data_f = db[function_name]
else:
# Better raise error?
return choosen_values
# Put the variables
for var in data_f['variables'].keys():
# Put the variables if there are still not selected
if var not in choosen_values.keys():
question = data_f['variables'][var]['question_info']
choosen_values[var] = general_questioner(**question)
# Put aggregated variables (descendants)
for var_desc in data_f['descendants']:
# Possible variables and aggregated parameter name
agg_description = var_desc['agg_description']
agg_param = var_desc['agg_name']
# prepare possible input for existant aggregated value in choosen
ifaggvar = agg_param in choosen_values
aggvarval = choosen_values[agg_param] if ifaggvar else {}
## Without dependant variable
if type(agg_description) == str:
# Obtain function name
fn = choo |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function, absolute_import
import unittest
from ..morf import analyze, disambiguate
# EINO SANTANEN. Muodon vanhimmat
# http://luulet6lgendus.blogspot.com/
sentences = '''KÕIGE VANEM MUDEL
Pimedas luusivad robotid,
originaalsed tšehhi robotid kahekümnendatest.
Robota! kisendavad nad, uhked originaalsed robotid,
hüüdes iseenda nime.
Robota! möirgavad nad, naftasegused elukad,
hiiglase vaimusünnitised, robotid:
kurvameelsetena kauguses,
ebamäärastena | kauguse | s,
mattudes vastuoludesse,
muutudes peaaegu julmaks oma õiglusejanus.
Robota! Kui päike pageb monoliitide kohalt,
tähistavad nad vägisi
öö salajast geomeetriat.
Õudne on inimesel vaadata
neid metsikuid mudeleid.
Kuuntele, romantiikkaa, 2002'''.split('\n')
class TestDisambiguator(unittest.TestCase):
"""Test the separate disambiguate function
against the built in disambiguate=True function.
Both must work the same."""
def test_disambiguator(self):
for sentence in sentences:
an_with = analyze(sentence)
an_without = analyze(sentence, disambiguate=False)
disamb = disambiguate(an_without)
self.assertListEqual(an_with, disamb)
|
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from django.template.loader import render_to_string
from django.conf import settings
from preferences.models import UserPreferences
from summaries.models import Unseen
from django.contrib.sites.models import Site
from optparse import make_option
from django.core.mail import EmailMultiAlternatives
class Command(BaseCommand):
args = 'daily | weekly | monthly'
help = 'Builds and sends summary mails for given period'
option_list = BaseCommand.option_list + (
make_option('--dry-run',
action='store_true',
dest='dry',
default=False,
help='Run without posting emails and writing them on stdout'),
)
def handle(self, *args, **options):
if not len(args) == 1:
raise CommandError("Give a period please")
period = args[0]
if not period in ("daily", "weekly", "monthly"):
raise CommandError("Period must be daily, weekly or monthly.")
users = [preference.user for preference in
UserPreferences.objects.filter(summary_mails=period)]
for user in users:
unseen_models = Unseen.objects.filter(user=user)
unseen_links = [unseen.link for unseen in unseen_models]
if unseen_links:
email_title = "%s n | ew links for you:" % len(unseen_links)
email_body_txt = render_to_string("summaries/body.txt", {
"user": user,
"links": unseen_links,
"site": Site.objects.get_current()
})
email_body_ | html = render_to_string("summaries/body.html", {
"user": user,
"links": unseen_links,
"site": Site.objects.get_current()
})
email = EmailMultiAlternatives(
email_title,
email_body_txt,
"Linkfloyd %s" %settings.DEFAULT_FROM_EMAIL,
[user.email,])
email.attach_alternative(email_body_html, "text/html")
email.send()
self.stdout.write("Summary email for %s sent\n" % user)
if not options['dry']:
unseen_models.delete()
|
# -*- coding: utf-8 -*-
#
# Modoboa documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 3 22:29:25 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Modoboa'
copyright = u'2016, Antoine Nguyen'
# T | he version info for the project you're documenting, acts a | s replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.6'
# The full version, including alpha/beta/rc tags.
release = '1.6.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['.build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_theme = 'default'
else:
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Modoboadoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Modoboa.tex', u'Modoboa Documentation',
u'Antoine Nguyen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'modoboa', u'Modoboa Documentation',
[u'Antoine Nguyen'], 1)
]
intersphinx_mapping = {
'amavis': ('http://modoboa-amavis.readthedocs.org/en/latest/', None)
}
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-03 14:00
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
( | 'restaurant', '0009_permission'),
]
operations = [
migrations.AlterModelOptions(
name='permission',
options={'permissions': (('isC | ook', 'Can see the cooks page'), ('isWaiter', 'Can see the waiter page'), ('isCashier', 'Can see the cashier page'))},
),
]
|
#!/usr/bin/env python
from setuptools import setup, find_packages
# get requirements.txt
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(name='athos-core',
description = 'Athos project core',
url = 'https://github.com/AthosOrg/',
packages = find_packages(),
entry_points = {
'console_scripts': [
'athos-core=athos.cmd:main'
| ]
},
install_requires = required,
| package_data = {'athos': ['default.yml']}
) |
.files import uncommented_lines
from fabtools.utils import run_as_root
# Python2 and 3 compatibility
from past.builtins import basestring
def exists(name):
"""
Check if a user exists.
"""
with settings(hide('running', 'stdout', 'warnings'), warn_only=True):
return run('getent passwd %(name)s' % locals()).succeeded
_SALT_CHARS = string.ascii_letters + string.digits + './'
def _crypt_password(password):
from crypt import crypt
random.seed()
salt = ''
for _ in range(2):
salt += random.choice(_SALT_CHARS)
crypted_password = crypt(password, salt)
return crypted_password
def create(name, comment=None, home=None, create_home=None, skeleton_dir=None,
group=None, create_group=True, extra_groups=None, password=None,
system=False, shell=None, uid=None, ssh_public_keys=None,
non_unique=False):
"""
Create a new user and its home directory.
If *create_home* is ``None`` (the default), a home directory will be
created for normal users, but not for system users.
You can override the default behaviour by setting *create_home* to
``True`` or ``False``.
If *system* is ``True``, the user will be a system account. Its UID
will be chosen in a specific range, and it will not have a home
directory, unless you explicitely set *create_home* to ``True``.
If *shell* is ``None``, the user's login shell will be the system's
default login shell (usually ``/bin/sh``).
*ssh_public_keys* can be a (local) filename or a list of (local)
filenames of public keys that should be added to the user's SSH
authorized keys (see :py:func:`fabtools.user.add_ssh_public_keys`).
Example::
import fabtools
if not fabtools.user.exists('alice'):
fabtools.user.create('alice')
with cd('/home/alice'):
# ...
"""
# Note that we use useradd (and not adduser), as it is the most
# portable command to create users across various distributions:
# http://refspecs.linuxbase.org/LSB_4.1.0/LSB-Core-generic/LSB-Core-generic/useradd.html
args = []
if comment:
args.append('-c %s' % quote(comment))
if home:
args.append('-d %s' % quote(home))
if group:
args.append('-g %s' % quote(group))
if create_group:
if not _group_exists(group):
_group_create(group)
if extra_groups:
groups = ','.join(quote(group) for group in extra_groups)
args.append('-G %s' % groups)
if create_home is None:
create_home = not system
if create_home is True:
args.append('-m')
elif create_home is False:
args.append('-M')
if skeleton_dir:
args.append('-k %s' % quote(skeleton_dir))
if password:
crypted_password = _crypt_password(password)
args.append('-p %s' % quote(crypted_password))
if system:
args.append('-r')
if shell:
args.append('-s %s' % quote(shell))
if uid:
args.append('-u %s' % uid)
if non_unique:
args.append('-o')
args.append(name)
args = ' '.join(args)
run_as_root('useradd %s' % args)
if ssh_public_keys:
if isinstance(ssh_public_keys, basestring):
ssh_public_keys = [ssh_public_keys]
add_ssh_public_keys(name, ssh_public_keys)
def modify(name, comment=None, home=None, move_current_home=False, group=None,
extra_groups=None, login_name=None, password=None, shell=None,
uid=None, ssh_public_keys=None, non_unique=False):
"""
Modify an existing user.
*ssh_public_keys* can be a (local) filename or a list of (local)
filenames of public keys that should be added to the user's SSH
authorized keys (see :py:func:`fabtools.user.add_ssh_public_keys`).
Example::
import fabtools
if fabtools.user.exists('alice'):
fabtools.user.modify('alice', shell='/bin/sh')
"""
args = []
if comment:
args.append('-c %s' % quote(comment))
if home:
args.append('-d %s' % quote(home))
if move_current_home:
args.append('-m')
if group:
args.append('-g %s' % quote(group))
if extra_groups:
groups = ','.join(quote(group) for group in extra_groups)
args.append('-G %s' % groups)
if login_name:
args.append('-l %s' % quote(login_name))
if password:
crypted_password = _crypt_password(password)
args.append('-p %s' % quote(crypted_password))
if shell:
args.append('-s %s' % quote(shell))
if uid:
args.append('-u %s' % quote(uid))
if non_unique:
args.append('-o')
if args:
args.append(name)
args = ' '.join(args)
run_as_root('usermod %s' % args)
if ssh_public_keys:
if isinstance(ssh_public_keys, basestring):
ssh_public_keys = [ssh_public_keys]
add_ssh_public_keys(name, ssh_public_keys)
def home_directory(name):
"""
Get the absolute path to the user's home directory
Example::
import fabtools
home = fabtools.user.home_directory('alice')
"""
with settings(hide('running', 'stdout')):
return run('echo ~' + name)
def local_home_directory(name=''):
"""
Get the absolute path to the local user's home directory
Example::
import fabtools
local_home = fabtools.user.local_home_directory()
"""
with settings(hide('running', 'stdout')):
return local('echo ~' + name, capture=True)
def authorized_keys(name):
"""
Get the list of authorized SSH public keys for the user
"""
ssh_dir = posixpath.join(home_directory(name), '.ssh')
authorized_keys_filename = posixpath.join(ssh_dir, 'authorized_keys')
return uncommented_lines(authorized_keys_filename, use_sudo=True)
def add_ssh_public_key(name, filename):
"""
Add a public key to the user's authorized SSH keys.
*filename* must be the local filename of a public key that should be
added to the user's SSH authorized keys.
Example::
import fabtools
fabtools.user.add_ssh_public_key('alice', '~/.ssh/id_rsa.pub')
"""
add_ssh_public_keys(name, [filename])
def add_ssh_public_keys(name, filenames):
"""
Add multiple public keys to the user's authorized SSH keys.
*filenames* must be a list of local filenames of public keys that
should be added to the user's SSH authorized keys.
Example::
import fabtools
fabtools.user.add_ssh_public_keys('alice', [
'~/.ssh/id1_rsa.pub',
'~/.ssh/id2_rsa.pub',
])
"""
from fabtools.require.files import (
directory as _require_directory,
file as _require_file,
)
ssh_dir = posixpath.join(home_directory(name), '.ssh')
_require_directory(ssh_dir, mode='700', owner=name, use_sudo=True)
authorized_keys_filename = posixpath.join(ssh_dir, 'authorized_keys')
_require_file(authorized_keys_filename, mode='600', owner=name,
use_sudo=True)
for filename in filenames:
|
with open(filename) as public_key_file:
public_key = public_key_file.read().strip()
# we don't use fabric.contrib.files.append() as it's buggy
if public_key not in authorized_keys(name):
sudo('echo %s >>%s' % (quote(public_key),
quote(authorized_keys_filename)))
def add_host_keys(name, hostname):
"""
Add all public keys of a host to the user's SSH known hosts file
" | ""
from fabtools.require.files import (
directory as _require_directory,
file as _require_file,
)
ssh_dir = posixpath.join(home_directory(name), '.ssh')
_require_directory(ssh_dir, mode='700', owner=name, use_sudo=True)
known_hosts_filename = posixpath.join(ssh_dir, 'known_hosts')
_require_file(known_hosts_filename, mode='644', owner=name, use_sudo=True)
known_hosts = uncommented_lines(known_hosts_filename, use_sudo=True)
with hide('running', 'stdout'):
res = |
import mut | able_attr
import unittest
class T(unittest.TestCase):
def test_foo(self):
mutable_attr.y = | 3
|
from ctypes import *
import unittest
import sys
class Test(unittest.TestCase):
def test_array2pointer(self):
array = (c_int * 3)(42, 17, 2)
# casting an array to a pointer works.
ptr = cast(array, POINTER(c_int))
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
if 2*sizeof(c_short) == sizeof(c_int):
ptr = cast(array, POINTER(c_short))
if sys.byteorder == "little":
self.assertEqual([ptr[i] for i in range(6)],
[42, 0, 17, 0, 2, 0])
else:
self.assertEqual([ptr[i] for i in range(6)],
[0, 42, 0, 17, 0, 2])
def test_address2pointer(self):
array = (c_int * 3)(42, 17, 2)
address = addressof(array)
ptr = cast(c_void_p(address), POINTER(c_int))
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
ptr = cast(address, POINTER(c_int))
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
def test_p2a_objects(self):
array = (c_char_p * 5)()
self.assertEqual(array._objects, None)
array[0] = b"foo bar"
self.assertEqual(array._objects, {'0': b"foo bar"})
p = cast(array, POINTER(c_char_p))
# array and p share a common _objects attribute
self.assertIs(p._objects, array._objects)
self.assertEqual(array._objects, {'0': b"foo bar", id(array): array})
p[0] = b"spam spam"
self.assertEqual(p._objects, {'0': b"spam spam", id(array): array})
self.assertIs(array._objects, p._objects)
p[1] = b"foo bar"
self.assertEqual(p._objects, {'1': b'foo bar', '0': b"spam spam", id(array): array})
self.assertIs(array._objects, p._objects)
def test_other(self):
p = cast((c_int * 4)(1, 2, 3, 4), POINTER(c_int))
self.assertEqual(p[:4], [1,2, 3, 4])
self.assertEqual(p[:4:], [1, 2, 3, 4])
self.assertEqual(p[3:-1:-1], [4, 3, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
c_int()
self.assertEqual(p[:4], [1, 2, 3, 4])
self.assertEqual(p[:4:], [1, 2, 3, 4])
self.assertEqual(p[3:-1:-1], [4, 3, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
p[2] = 96
self.assertEqual(p[:4], [1, 2, 96, 4])
self.assertEqual(p[:4 | :], [1, 2, 96, 4])
self.assertEqual(p[3:-1:-1], [4, 96, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
c_int()
self.assertEqual(p[:4], [1, 2, 96, 4])
self.assertEqual(p[:4:], [1, 2, 96, 4])
self.assertEqual(p[3:-1:-1], [4, 96, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
def test_char_p(self):
# This didn't work: bad argument to internal function
s = c_char_p(b"hiho")
self.assertEqual(cast(cast(s, c_void_p), c_char_p).value,
| b"hiho")
try:
c_wchar_p
except NameError:
pass
else:
def test_wchar_p(self):
s = c_wchar_p("hiho")
self.assertEqual(cast(cast(s, c_void_p), c_wchar_p).value,
"hiho")
if __name__ == "__main__":
unittest.main()
|
', # SET THIS!
'component': component, #"Database Queries", # SET THIS!
'version': 'unspecified',
'blocked': '', # SET THIS! (to tracking bug or empty for no tracking bug)
'op_sys': 'All',
'rep_platform': 'All',
}
base_url = "https://bugzilla.wikimedia.org/xmlrpc.cgi"
saveMigration = True
skip_existing = "-importdoubles" not in sys.argv
if False:
base_url = "http://192.168.1.103:8080/xmlrpc.cgi"
saveMigration = False
skip_existing = False
bug_defaults = {
'product': 'TestProduct', # SET THIS!
'component': 'TestComponent', # SET THIS!
'version': 'unspecified',
'blocked': '', # SET THIS! (to tracking bug or empty for no tracking bug)
'op_sys': 'All',
'rep_platform': 'All',
}
username = "wmf.bugconverter@gmail.com"
import config
password = config.password
print "Logging in to Bugzilla..."
bz = bugzilla.Bugzilla(url=base_url)
bz.login(username, password)
def hook(a):
for key in a:
if isinstance(a[key], basestring):
try:
a[key] = datetime.strptime(a[key], "%Y-%m-%dT%H:%M:%S.%f+0000")
except Exception, e:
pass
return a
def get(*args, **kwargs):
kwargs['verify'] = False # mitmproxy
return json.loads(requests.get(*args, **kwargs).text, object_hook=hook)
def reply_format(text, nindent=1):
prefix = ('>'*nindent + ' ') if nindent > 0 else ''
return textwrap.fill(text, initial_indent=prefix, subsequent_indent=prefix, break_long_words=False)
def htmltobz(html):
# remove 'plain text' links that were linkified by jira
html = re.sub(r'<a href="(.*?)">\1</a>', r'\1', html)
h = html2text.HTML2Text()
h.body_width = 0
h.ignore_links = True
h.inline_links = False
h.unicode_snob = True
| return h.handle(html)
users = {}
try:
f = open('user-email-mapping.json', 'r')
users = json.load(f)
except Exception, e:
print e
def getBZuser(email, name):
global user | s
if not email:
email = name + "@invalid"
if email in users:
return users[email]
try:
user = bz.getuser(email)
users[email] = email
return email
except bugzilla.xmlrpclib.Fault, e:
if e.faultCode == 51:
pass
else:
raise
# not found, try heuristics. Search by Full Name!
fusers = bz.searchusers(name)
if not fusers:
users[email] = None
else:
user = fusers[0]
print "Assuming %s <%s> is actually %s <%s>" % (name, email, user.real_name, user.email)
if raw_input("Is this OK? Y/n ").upper().strip() == "Y":
users[email] = user.email
else:
users[email] = None
return users[email]
print "Retrieving issues from JIRA..."
issues = get(
'https://jira.toolserver.org/rest/api/2/search',
params={
'jql': jql,
'fields': 'self',
'maxResults': stepsize
}
)['issues']
runAll = False
maillist = {}
retrIssues = []
print "Getting %i details..." % len(issues)
for issue in issues:
issue = get(issue['self'] + "?expand=renderedFields")
retrIssues.append(issue)
fields = issue['fields']
if fields['assignee']:
maillist[fields['assignee']['emailAddress']] = fields['assignee']['displayName']
maillist[fields['reporter']['emailAddress']] = fields['reporter']['displayName']
for c in fields['comment']['comments']:
if 'author' in c:
maillist[c['author']['emailAddress']] = c['author']['displayName']
print "Retrieving users from bugzilla..."
for mail, name in maillist.items():
bzu = getBZuser(mail, name)
if bzu:
print "%s <%s> => %s" % (name, mail, bzu)
else:
print "%s <%s> not found" % (name, mail)
f = open('user-email-mapping.json', 'w')
json.dump(users, f, indent=4)
f.close()
for issue in retrIssues:
fields = issue['fields']
renderedFields = issue['renderedFields']
# check if issue is already on BZ
existing_bugs = bz.query({"short_desc": issue['key'] + " "})
if existing_bugs and skip_existing:
found = False
for bug in existing_bugs:
if (issue['key'] + " ") in bug.summary:
print "Skipping " + issue['key'] + " " + fields['summary'] + "; already uploaded? Check bug ID %i" % bug.bug_id
found = True
break
if found:
continue
cclist = set()
if fields['assignee']:
cclist.add(getBZuser(fields['assignee']['emailAddress'], fields['assignee']['displayName']))
assignee = "%s <%s>" % (fields['assignee']['displayName'], fields['assignee']['emailAddress'])
else:
assignee = "(none)"
cclist.add(getBZuser(fields['reporter']['emailAddress'], fields['reporter']['displayName']))
print issue['key'] + " " + fields['summary'],
sys.stdout.flush()
if not runAll:
if raw_input().upper() == "A":
runAll = True
if not renderedFields['description']:
renderedFields['description'] = u''
description = u"""This issue was converted from https://jira.toolserver.org/browse/{i[key]}.
Summary: {f[summary]}
Issue type: {f[issuetype][name]} - {f[issuetype][description]}
Priority: {f[priority][name]}
Status: {f[status][name]}
Assignee: {assignee}
-------------------------------------------------------------------------------
From: {f[reporter][displayName]} <{f[reporter][emailAddress]}>
Date: {f[created]:%a, %d %b %Y %T}
-------------------------------------------------------------------------------
{description}
""".format(i=issue, f=fields, assignee=assignee, description=htmltobz(renderedFields['description']))
params = bug_defaults.copy()
params['bug_severity'] = fields['priority']['name']
params['summary'] = issue['key'] + " " + fields['summary']
params['description'] = description
params['assigned_to'] = username # set assignee to the bug convertor initially
bug = bz.createbug(**params)
print " -- bz id ", bug.bug_id,
sys.stdout.flush()
ncs = 0
natt = 0
for comment,renderedComment in zip(fields['comment']['comments'], renderedFields['comment']['comments']):
ncs += 1
if 'author' in comment:
cclist.add(getBZuser(comment['author']['emailAddress'], comment['author']['displayName']))
else:
comment['author'] = {'displayName': "Anonymous", 'emailAddress': 'None'}
commenttext = u"""-------------------------------------------------------------------------------
From: {f[author][displayName]} <{f[author][emailAddress]}>
Date: {f[created]:%a, %d %b %Y %T}
-------------------------------------------------------------------------------
{description}
""".format(f=comment, description=htmltobz(renderedComment["body"]))
bug.addcomment(commenttext)
if 'attachment' in fields:
for attachment in fields['attachment']:
if attachment['author']['emailAddress'] == comment['author']['emailAddress'] and \
abs(attachment['created'] - comment['created']) < timedelta(seconds=1):
natt += 1
atfile = bug.bugzilla.attachfile(
bug.bug_id,
requests.get(attachment['content'], stream=True).raw,
comment["body"],
file_name = attachment['filename'],
content_type = attachment['mimeType']
)
# now insert email addresses. Do this as last action, to prevent bugspam
update = {'cc_add': []}
if fields['assignee']:
bzu = getBZuser(fields['assignee']['emailAddress'], fields['assignee']['displayName'])
if bzu:
update['assigned_to'] = bzu
for user in cclist:
if user:
update['cc_add'].append(user)
if fields['status']:
sn = fields['status']['name']
if sn in ["Open", "Reopened", "Unassigned", "Accepted", "Waiting for customer"]:
update['status'] = "NEW"
elif sn in ["In Progress", "In Re |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# If this page isn't working, try executing `chmod +x app.py` in terminal.
# enable debugging
import cgitb, cgi; cgitb.enable()
from classes import Factory
fieldStorage = cgi.FieldStorage()
factory = Factory.Factory()
| webApp = factory.makeWebApp(fieldStorage)
def outputHeaders():
print "Content-Type: text/html"
print # signals | end of headers
outputHeaders()
print webApp.getOutput()
|
# -*- coding: utf-8 -*-
def get_instance_children(obj, depth=0, sig=0):
"""
Récupèration récursive des relations enfants d'un objet
|
@depth: integer limitant le niveau de recherche des enfants, 0=illimité
"""
children = []
# Pour toute les relations enfants de l'objet
for ch | ild in obj._meta.get_all_related_objects():
# Nom de l'attribut d'accès
cname = child.get_accessor_name()
verbose_name = child.model._meta.verbose_name
# Récupère tout les objets des relations
for elem in getattr(obj, cname).all():
followed = []
# Recherche récursive des enfants
if depth == 0 or sig < depth:
followed = get_instance_children(elem, depth=depth, sig=sig+1)
children.append( (verbose_name, unicode(elem), followed) )
return children
|
from django.test import TestCase
from bookstore.models import Book, Ca | tegory
class InventoryModelTest(TestCase):
def test_string_representation_of_categories(self):
category = Category.objects.create(name="health", description="health category")
self.assertEqual(category.name, 'health')
def test_string_representation_of_books(self):
test_category2 = Category.objects.create()
book = Book(name='some text', category=test_category2)
self.assertEqual(str(book), 'some text')
| |
fields.many2one('product.pricelist', 'Pricelist', help='Pricelist of the selected partner.'),
'partner_invoice_id': fields.many2one('res.partner', 'Invoicing Address'),
'invoice_method': fields.selection([
("none", "No Invoice"),
("b4repair", "Before Repair"),
("after_repair", "After Repair")
], "Invoice Method",
select=True, required=True, states={'draft': [('readonly', False)]}, readonly=True, help='Selecting \'Before Repair\' or \'After Repair\' will allow you to generate invoice before or after the repair is done respectively. \'No invoice\' means you don\'t want to generate invoice for this repair order.'),
'invoice_id': fields.many2one('account.invoice', 'Invoice', readonly=True, track_visibility="onchange", copy=False),
'move_id': fields.many2one('stock.move', 'Move', readonly=True, help="Move created by the repair order", track_visibility="onchange", copy=False),
'fees_lines': fields.one2many('mrp.repair.fee', 'repair_id', 'Fees', readonly=True, states={'draft': [('readonly', False)]}, copy=True),
'internal_notes': fields.text('Internal Notes'),
'quotation_notes': fields.text('Quotation Notes'),
'company_id': fields.many2one('res.company', 'Company'),
'invoiced': fields.boolean('Invoiced', readonly=True, copy=False),
'repaired': fields.boolean('Repaired', readonly=True, copy=False),
'amount_untaxed': fields.function(_amount_untaxed, string='Untaxed Amount',
store={
'mrp.repair': (lambda self, cr, uid, ids, c={}: ids, ['operations', 'fees_lines'], 10),
'mrp.repair.line': (_get_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
'mrp.repair.fee': (_get_fee_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
}),
'amount_tax': fields.function(_amount_tax, string='Taxes',
store={
'mrp.repair': (lambda self, cr, uid, ids, c={}: ids, ['operations', 'fees_lines'], 10),
'mrp.repair.line': (_get_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
'mrp.repair.fee': (_get_fee_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
}),
'amount_total': fields.function(_amount_total, string='Total',
store={
'mrp.repair': (lambda self, cr, uid, ids, c={}: ids, ['operations', 'fees_lines'], 10),
'mrp.repair.line': (_get_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
'mrp.repair.fee': (_get_fee_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
}),
}
def _default_stock_location(self, cr, uid, context=None):
try:
warehouse = self.pool.get('ir.model.data').get_object(cr, uid, 'stock', 'warehouse0')
return warehouse.lot_stock_id.id
except:
return False
_defaults = {
'state': lambda *a: 'draft',
'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').next_by_code(cr, uid, 'mrp.repair'),
'invoice_method': lambda *a: 'none',
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.repair', context=context),
'pricelist_id': lambda self, cr, uid, context: self.pool['product.pricelist'].search(cr, uid, [], limit=1)[0],
'product_qty': 1.0,
'location_id': _default_stock_location,
}
_sql_constraints = [
('name', 'unique (name)', 'The name of the Repair Order must be unique!'),
]
def onchange_product_id(self, cr, uid, ids, product_id=None):
""" On change of product sets some values.
@param product_id: Changed product
@return: Dictionary of values.
"""
product = False
if product_id:
product = self.pool.get("product.product").browse(cr, uid, product_id)
return {'value': {
'guarantee_limit': False,
'lot_id': False,
'product_uom': product and product.uom_id.id or False,
}
}
def onchange_product_uom(self, cr, uid, ids, product_id, product_uom, context=None):
res = {'value': {}}
if not product_uom or not product_id:
return res
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}
res['value'].update({'product_uom': product.uom_id.id})
return res
def onchange_location_id(self, cr, uid, ids, location_id=None):
""" On change of location
"""
return {'value': {'location_dest_id': location_id}}
def button_dummy(self, cr, uid, ids, context=None):
return True
def onchange_partner_id(self, cr, uid, ids, part, address_id):
""" On change of partner sets the values of partner address,
partner invoice address and pricelist.
@param part: Changed id of partner.
@param address_id: Address id from current record.
@return: Dictionary of values.
"""
part_obj = self.pool.get('res.partner')
pricelist_obj = self.pool.get('product.pricelist')
if not part:
return {'val | ue': {
'address_id': False,
'partner_invoice_id': False,
'pricelist_id': pricelist_obj.search(cr, uid, [], limit=1)[0]
}
}
addr = part_obj.address_get(cr, uid, [part], ['delivery', 'invoice', 'contact'])
partner = part_obj.browse(cr, uid, part)
pricelist = | partner.property_product_pricelist and partner.property_product_pricelist.id or False
return {'value': {
'address_id': addr['delivery'] or addr['contact'],
'partner_invoice_id': addr['invoice'],
'pricelist_id': pricelist
}
}
def action_cancel_draft(self, cr, uid, ids, *args):
""" Cancels repair order when it is in 'Draft' state.
@param *arg: Arguments
@return: True
"""
if not len(ids):
return False
mrp_line_obj = self.pool.get('mrp.repair.line')
for repair in self.browse(cr, uid, ids):
mrp_line_obj.write(cr, uid, [l.id for l in repair.operations], {'state': 'draft'})
self.write(cr, uid, ids, {'state': 'draft'})
return self.create_workflow(cr, uid, ids)
def action_confirm(self, cr, uid, ids, *args):
""" Repair order state is set to 'To be invoiced' when invoice method
is 'Before repair' else state becomes 'Confirmed'.
@param *arg: Arguments
@return: True
"""
mrp_line_obj = self.pool.get('mrp.repair.line')
for o in self.browse(cr, uid, ids):
if (o.invoice_method == 'b4repair'):
self.write(cr, uid, [o.id], {'state': '2binvoiced'})
else:
self.write(cr, uid, [o.id], {'state': 'confirmed'})
for line in o.operations:
if line.product_id.tracking != 'none' and not line.lot_id:
raise UserError(_("Serial number is required for operation line with product '%s'") % (line.product_id.name))
mrp_line_obj.write(cr, uid, [l.id for l in o.operations], {'state': 'confirmed'})
return True
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels repair order.
@return: True
|
"""
Test using HAR files in Python tests against the Django ReST framework.
"""
from django import http
from rest_framework import response
from test_har import django_rest_har as test_har
from test_har import tests
class HARDogfoodDRFTests(tests.HARDogfoodTestCase, test_har.HARTestCase):
"""
Test using HAR files in Python tests agai | nst the Django ReST framework.
"""
RESPONSE_TYPE = (http.HttpResponse, response.Response)
def test_runner(self):
"""
Ens | ure tests are running.
"""
self.assertTrue(True)
|
pty if False.
hidden_c (set): columns that should be hidden.
squash_c (bool): column is removed if True or empty if False.
colsize (dict): mapping column index to its required width.
width (int): total width of the table.
height (int): total heigth of the table.
nrows (int): total number of rows (lines).
ncols (int): total number of columns.
header (str): table header line (empty by default).
footer (str): table footer line (empty by default).
"""
def __init__(self, rows=None, formatter=None, outfile=None):
if rows is None:
rows = []
self.rows = rows
self.rowparams = {
"colsize": {},
"hidden_c": set(),
"squash_c": True,
"formatter": formatter,
"outfile": outfile,
}
self.maxlength = float("inf")
self.hidden_r = set()
self.hidden_c = self.rowparams["hidden_c"]
self.squash_r = True
self.colsize = self.rowparams["colsize"]
self.update()
self.header = ""
self.footer = ""
def update(self, *rr):
"recompute the column width over rr range of rows, and update colsize array"
| for c in range(self.ncols):
cz = self.colsize.get(c, 0) if len(rr) > 0 else 0
self.colsize[c] = max(cz, self.getcolsize(c, rr, squash=False))
def getcolsize(self, c, rr=None, squash=True):
"compute the given column width (over rr | list of row indices if not None.)"
cz = 0
if not rr:
rr = range(self.nrows)
for i in rr:
if self.rowparams["squash_c"] and (i in self.hidden_r):
if squash:
continue
cz = max(cz, self.rows[i].colsize(c))
return cz
@property
def width(self):
sep = self.rowparams.get("sep", "")
cs = self.ncols * len(sep)
return sum(self.colsize.values(), cs)
def setcolsize(self, c, value):
"set column size to value"
i = range(self.ncols)[c]
self.colsize[i] = value
def addcolsize(self, c, value):
"set column size to value"
i = range(self.ncols)[c]
self.colsize[i] += value
def addrow(self, toks):
"add row of given list of tokens and update table"
self.rows.append(tokenrow(toks))
self.update()
return self
def addcolumn(self,lot,c=None):
"add column with provided toks (before index c if given) and update table"
if c is None:
c = self.ncols
for ir,toks in enumerate(lot):
if ir < self.nrows:
r = self.rows[ir]
for _ in range(r.ncols,c):
r.cols.append([(Token.Column, "")])
toks.insert(0,(Token.Column, ""))
r.cols.insert(c,toks)
else:
logger.warning("addcolumn: to much rows in provided list of tokens")
break
self.update()
return self
def hiderow(self, n):
"hide given row"
self.hidden_r.add(n)
def showrow(self, n):
"show given row"
self.hidden_r.remove(n)
def hidecolumn(self, n):
"hide given column"
self.hidden_c.add(n)
def showcolumn(self, n):
"show given column"
self.hidden_c.remove(n)
def showall(self):
"remove all hidden rows/cols"
self.hidden_r = set()
self.rowparams["hidden_c"] = set()
self.hidden_c = self.rowparams["hidden_c"]
return self
def grep(self, regex, col=None, invert=False):
"search for a regular expression in the table"
L = set()
R = range(self.nrows)
for i in R:
if i in self.hidden_r:
continue
C = self.rows[i].rawcols(col)
for c, s in enumerate(C):
if c in self.hidden_c:
continue
if re.search(regex, s):
L.add(i)
break
if not invert:
L = set(R) - L
for n in L:
self.hiderow(n)
return self
@property
def nrows(self):
return len(self.rows)
@property
def ncols(self):
if self.nrows > 0:
return max((r.ncols for r in self.rows))
else:
return 0
def __str__(self):
s = []
formatter = self.rowparams["formatter"]
outfile = self.rowparams["outfile"]
for i in range(self.nrows):
if i in self.hidden_r:
if not self.squash_r:
s.append(
highlight(
[
(
Token.Hide,
self.rows[i].show(raw=True, **self.rowparams),
)
],
formatter,
outfile,
)
)
else:
s.append(self.rows[i].show(**self.rowparams))
if len(s) > self.maxlength:
s = s[: self.maxlength - 1]
s.append(highlight([(Token.Literal, icons.dots)], formatter, outfile))
if self.header:
s.insert(0, self.header)
if self.footer:
s.append(self.footer)
return "\n".join(s)
class tokenrow(object):
"""
A vltable row (line) of tabulated data tokens.
Attributes:
toks (list): list of tokens tuple (Token.Type, str).
maxwidth: maximum authorized width of this row.
align (str): left/center/right aligment indicator (default to "<" left).
fill (str): fill character used for padding to required size.
separator (str): character used for separation of columns.
cols (list): list of columns of tokens.
ncols (int): number of columns in this row.
"""
def __init__(self, toks=None):
if toks is None:
toks = []
self.maxwidth = float("inf")
self.align = "<"
self.fill = " "
self.separator = ""
toks = [(t, "%s" % s) for (t, s) in toks]
self.cols = self.cut(toks)
def cut(self,toks):
"cut the raw list of tokens into a list of column of tokens"
C = []
c = []
for t in toks:
c.append(t)
if t[0] == Token.Column:
C.append(c)
c = []
C.append(c)
return C
def colsize(self, c):
"return the column size (width)"
if c >= len(self.cols):
return 0
return sum((len(t[1]) for t in self.cols[c] if t[0] != Token.Column))
@property
def ncols(self):
return len(self.cols)
def rawcols(self, j=None):
"return the raw (undecorated) string of this row (j-th column if given)"
r = []
cols = self.cols
if j is not None:
cols = self.cols[j : j + 1]
for c in cols:
r.append("".join([t[1] for t in c]))
return r
def show(self, raw=False, **params):
"highlight the row with optional parameters"
formatter = params.get("formatter", None)
outfile = params.get("outfile", None)
align = params.get("align", self.align)
fill = params.get("fill", self.fill)
sep = params.get("sep", self.separator)
width = params.get("maxwidth", self.maxwidth)
colsz = params.get("colsize")
hidden_c = params.get("hidden_c", set())
squash_c = params.get("squash_c", True)
head = params.get("head", "")
tail = params.get("tail", "")
if raw:
formatter = "Null"
outfile = None
r = [head]
tz = 0
for i, c in enumerate(self.cols):
toks = []
sz = 0
mz = colsz[i]
tz += mz
if tz > width:
mz = mz - (tz - width)
skip = False
for tt, tv in c:
if tt == |
'''
Takes file names from the final/ folder and parses the information into
readable values and produces statistical measures. Use this modul | e as an
executable to process all result information for a single problem, such as:
python stats.py final/multiply*.dat
Do not mix problems in a single run.
NOTE: You CANNOT | use pypy for this as scipy is current unsupported. Use
python 2.7 instead.
'''
from scipy import stats
import json
import sys
from os import path
from collections import defaultdict
from util import pretty_name, median_deviation
from scipy.stats.mstats import kruskalwallis
if __name__ == '__main__':
# Run through all of the files gathering different seeds into lists
statify = defaultdict(list)
active = defaultdict(list)
filecount = 0
for filename in sys.argv[1:]:
base = path.basename(filename)
try:
problem, nodes, version, seed = base.split('_')
with open(filename, 'r') as f:
data = json.load(f)
statify[version].append(data[1]['evals'])
active[version].append(data[1]['phenotype'])
filecount += 1
except ValueError:
print filename, "FAILED"
print 'Files Successfully Loaded', filecount
print 'Kruskal Wallis', kruskalwallis(statify.values())
for version, data in statify.iteritems():
print '--------- %s ---------' % pretty_name[version]
print "MES, MAD", median_deviation(data)
print 'Active', median_deviation(active[version])
print 'Mann Whitney U against Normal',
print stats.mannwhitneyu(statify['normal'], data)
|
from selenium.webdriver.firefox.webdriver import WebDriver
from tests_group.group_lib import GroupBase
from tests_contract.contract_lib import ContactBase
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, user_name, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("%s" % user_name)
wd.find_element_by_id("LoginForm").click()
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("%s" % password)
wd.find_element_by_css_selecto | r("input[type=\"submit\"]").click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
def is_logged_in(self,):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Logout")) > 0
def is_logged_in_as(self, username):
wd = self.app.wd
return wd.find_element_by_xpath("//div/div[1]/form/b").text == '('+username+')'
def ensure_logout(self):
if se | lf.is_logged_in():
self.logout()
def ensure_login(self, user_name, password):
if self.is_logged_in():
if self.is_logged_in_as(user_name):
return
else:
self.logout()
self.login(user_name, password)
class BaseClass():
def __init__(self):
self.wd = WebDriver()
#self.wd.implicitly_wait(3)
self.session = SessionHelper(self)
self.group = GroupBase(self)
self.contact = ContactBase(self)
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
wd.get("http://localhost/addressbook/")
def restore(self):
wd = self.wd
wd.quit()
|
import asposebarcodecloud
from asposebarcodecloud.BarcodeApi import BarcodeApi
from asposebarcodecloud.BarcodeApi import ApiException
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
from asposestoragecloud.StorageApi import ResponseMessage
import ConfigParser
config = ConfigParser.ConfigParser()
config.readfp(open(r'../../data/config.properties'))
apiKey = config.get('AppConfig', 'api_key')
appSid = config.get('AppConfig', 'app_sid')
out_folder = config.get('AppConfig', 'out_folder')
data_folder = "../../data/" #resouece data folder
#ExStart:1
#Instantiate Aspose Storage AP | I SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Barcode API SDK
api_client = asposebarcodecloud.ApiClient.ApiClient(apiKey, appSid, True)
barcodeApi = BarcodeApi(api_client);
#Set the barcode file name created on server
name = "sample- | barcode"
#Set Text to encode inside barcode.
text = "Aspose.BarCode"
#Set Barcode Symbology
type = "Code128"
#Set Generated Barcode Image Format
format = "PNG"
#Set height, Width and quality of the image
imageHeight = 1.0;
imageWidth = 1.0;
imageQuality = "default";
try:
#invoke Aspose.BarCode Cloud SDK API to generate image with specific height, width, and quality along with auto size option
response = barcodeApi.PutBarcodeGenerateFile(name, file= None, text=text, type=type, format=format, imageHeight=imageHeight, imageWidth=imageWidth, imageQuality=imageQuality)
if response.Status == "OK":
#download generated barcode from cloud storage
response = storageApi.GetDownload(Path=name)
outfilename = out_folder + name + "." + format
with open(outfilename, 'wb') as f:
for chunk in response.InputStream:
f.write(chunk)
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
#ExEnd:1 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from lxml import etree as ElementTree
from odoo.http import Controller, route, request
class Board(Controller):
@route('/board/add_to_dashboard', type='json', auth='user')
def add_to_dashboard(self, action_id, context_to_save, domain, view_mode, name=''):
# Retrieve the 'My Dashboard' action from its xmlid
action = request.env.ref('board.open_board_my_dash_action')
if action and action['res_model'] == 'board.board' and action['views'][0][1] == 'form' and action_id:
# Maybe should check the content instead of model board.board ?
view_id = action['views'][0][0]
board = request.env['board.board'].fields_view_get(view_id, 'form')
if board and 'arch' in board:
xml = ElementTree.fromstring(board['arch'])
column = xml.find('./board/column')
if column is not None: |
new_action = ElementTree.Element('action', {
'name': str(action_id),
' | string': name,
'view_mode': view_mode,
'context': str(context_to_save),
'domain': str(domain)
})
column.insert(0, new_action)
arch = ElementTree.tostring(xml, encoding='unicode')
request.env['ir.ui.view.custom'].create({
'user_id': request.session.uid,
'ref_id': view_id,
'arch': arch
})
return True
return False
|
from argparse import Action
from flexget.options import Argument | Parser
def test_subparser_nested_namespace():
p = ArgumentParser()
p.add_argument('--outer')
p.add_subparsers(nested_namespaces=True)
sub = p.add_subparser('sub')
sub.add_argument('--inner')
sub.add_subparsers()
subsub = sub.add_subparser('subsub')
subsub.add_argument('--innerinner')
result = p.parse_args(['--outer', 'a', 'sub', '--inner', 'b', 'subsub', '--innerinner', 'c'])
assert result.outer == 'a'
# First subparser values shoul | d be nested under subparser name
assert result.sub.inner == 'b'
assert not hasattr(result, 'inner')
# The second layer did not define nested_namespaces, results should be in first subparser namespace
assert result.sub.innerinner == 'c'
assert not hasattr(result, 'innerinner')
def test_subparser_parent_defaults():
p = ArgumentParser()
p.add_argument('--a')
p.set_post_defaults(a='default')
p.add_subparsers()
p.add_subparser('sub')
p.add_subparser('sub_with_default', parent_defaults={'a': 'sub_default'})
# Make sure normal default works
result = p.parse_args(['sub'])
assert result.a == 'default'
# Test subparser default
result = p.parse_args(['sub_with_default'])
assert result.a == 'sub_default'
# Subparser default should not override explicit one
result = p.parse_args(['--a', 'manual', 'sub_with_default'])
assert result.a == 'manual'
def test_post_defaults():
class CustomAction(Action):
def __call__(self, parser, namespace, values, option_string=None):
if not hasattr(namespace, 'post_set'):
namespace.post_set = 'custom'
p = ArgumentParser()
p.add_argument('--post-set')
p.add_argument('--custom', action=CustomAction, nargs=0)
p.set_post_defaults(post_set='default')
# Explicitly specified, no defaults should be set
result = p.parse_args(['--post-set', 'manual'])
assert result.post_set == 'manual'
# Nothing supplied, should get the post set default
result = p.parse_args([])
assert result.post_set == 'default'
# Custom action should be allowed to set default
result = p.parse_args(['--custom'])
assert result.post_set == 'custom'
|
from django.test import TestCase
from restclients.myplan import get_plan
class MyPlanTestData(TestCase):
def test_javerage(self):
plan = get_plan(regid="9136CCB8F66711D5BE060004AC494FFE", year=2013, quarter="spring", terms=4)
self.assertEquals(len(plan.terms), 4)
self.assertEquals(plan.terms[0].year, 2013)
self.assertEquals(plan.terms[1].year, 2013)
self.assertEquals(plan.terms[2].year, 2013)
self.assertEquals(plan.terms[3].year, 2014)
self.assertEquals(plan.terms[0].quarter, 'Spring')
self.assertEquals(plan.terms[1].quarter, 'Summer')
self.assertEquals(plan.terms[2].quarter, 'Autumn')
self.assertEquals(plan.terms[3].quarter, 'Winter')
self.assertEquals(len(plan.terms[0].courses), 2)
self.assertEquals(len(plan.terms[1].courses), 1)
self.assertEquals(len(plan.terms[2].courses), 0)
self.assertEquals(len(plan.terms[3].courses), 0)
term_data = plan.terms[0]
self.assertEquals(term_data.course_search_href,
"https://uwkseval.cac.washington.edu/student/myplan/mplogin/netid?rd=/student/myplan/course")
self.assertEquals(term_data.degree_audit_href,
"https://uwkseval.cac.washington.edu/student/myplan/mplogin/netid?rd=/student/myplan/audit/degree")
self.assertEquals(term_data.myplan_href,
"https://uwkseval.cac.washington.edu/student/myplan/mplogin/netid?rd=/student/myplan/plan/20132")
self.assertEquals(t | erm_data.registration_href,
"https://uwkseval.cac.washington.edu/student/myplan/mplogin/netid?rd=/student/myplan/registration/20132")
self.assertEquals(term_data.registered_courses_count, 0)
self.assertEquals(term_data.registered_sections_count, 0)
self.assertEquals(term_data.courses[0].registrations_available, True)
self.assertEquals(te | rm_data.courses[0].curriculum_abbr, 'CSE')
self.assertEquals(term_data.courses[0].course_number, '101')
self.assertEquals(len(term_data.courses[0].sections), 3)
self.assertEquals(term_data.courses[0].sections[0].section_id, 'A')
self.assertEquals(term_data.courses[0].sections[1].section_id, 'AA')
self.assertEquals(term_data.courses[0].sections[2].section_id, 'AB')
def test_json(self):
plan = get_plan(regid="9136CCB8F66711D5BE060004AC494FFE",
year=2013, quarter="spring",
terms=4)
json_data = plan.json_data()
term_data = json_data["terms"][0]
self.assertEquals(term_data["courses"][0]["sections"][1]["section_id"], "AA")
self.assertEquals(term_data["registered_courses_count"], 0)
self.assertEquals(term_data["registration_href"],
"https://uwkseval.cac.washington.edu/student/myplan/mplogin/netid?rd=/student/myplan/registration/20132")
self.assertEquals(term_data["course_search_href"],
"https://uwkseval.cac.washington.edu/student/myplan/mplogin/netid?rd=/student/myplan/course")
self.assertEquals(term_data["quarter"], "Spring")
|
# Some useful functions to extract data out of emails
# Copyright (C) 2002-2012 John Goerzen & contributors
#
# Thi | s program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import email
from email.Parser import Parser as MailParser
import time
def get_message_date(content, header='Date'):
"""
Parses mail and returns resulting timestamp.
:param header: the header to extract date from;
:returns: timestamp or `None` in the case of failure.
"""
message = MailParser().parsestr(content, True)
dateheader = message.get(header)
# parsedate_tz returns a 10-tuple that can be passed to mktime_tz
# Will be None if missing or not in a valid format. Note that
# indexes 6, 7, and 8 of the result tuple are not usable.
datetuple = email.utils.parsedate_tz(dateheader)
if datetuple is None:
return None
return email.utils.mktime_tz(datetuple)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.utils import timezone
from django.test import TestCase
from django.contrib.auth.models import User
from django.test.utils import override_settings
import six
from happenings.models import Event
@override_settings(CALENDAR_SHOW_LIST=True)
class SetMeUp(TestCase):
@classmethod
def setUpClass(cls):
super(SetMeUp, cls).setUpClass()
cls.user = User.objects.create_user(
'foo', 'bar@example.com', 'secret'
)
html = '">% | d</a><a class='
cls.cal_str = lambda self, day: html % day
cls.event_div = '<div class="calendar-event">'
@classmethod
def tearDownClass(cls):
cls.user.delete()
def clean_whitespace(self, resp | onse):
"""Remove all newlines and all occurances of multiple spaces."""
if hasattr(response, 'content'):
is_response = True
content = response.content
else:
is_response = False
content = response
if isinstance(content, six.text_type):
content = content.encode('utf-8')
content = content.replace(b'\n', b'')
for num_spaces in range(7, 2, -1):
# reduce all multiple spaces to 2 spaces.
# We can process here only `num_spaces=3` with the same result, but it will be slower
while content.find(b' '*num_spaces) >= 0:
content = content.replace(b' '*num_spaces, b' '*2)
content = content.replace(b' '*2, b'')
if is_response:
response.content = content
else:
content = content.decode('utf-8')
return content
def create_event(created_by, title, description, all_day=False,
start_date=None, end_date=None, categories=None, tags=None,
repeat='NEVER', end_repeat=None, full=True, utc=False):
"""
A factory method for creating events. If start_date is supplied,
end_date must also be supplied, and they must both be either lists
or tuples e.g. start_date=[2014, 2, 2], end_date=[2014, 2, 3].
"""
if start_date and end_date:
# Set the start and end dates to local tz
if utc:
val = timezone.utc
else:
val = timezone.get_default_timezone()
start_date = timezone.make_aware(datetime.datetime(*start_date), val)
end_date = timezone.make_aware(datetime.datetime(*end_date), val)
elif start_date and not end_date or end_date and not start_date:
raise ValueError("Both start_date and end_date must be supplied or not"
" supplied at all when using create_event")
else:
start_date = timezone.now()
end_date = timezone.now()
event = Event.objects.create(
start_date=start_date,
end_date=end_date,
all_day=all_day,
created_by=created_by,
title=title,
description=description,
repeat=repeat,
end_repeat=end_repeat
)
if categories:
for category in categories:
event.categories.create(title=category)
if tags:
for tag in tags:
event.tags.create(name=tag)
if full:
event.full_clean()
event.save()
return event
|
"""
(c) 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");?you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software?distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
import logging
from .exchange2010 import Exchange2010Service # noqa
from .connection import ExchangeNTLMAuthConnection # noqa
from .connection import ExchangeHTTPBasicAuthConnection
# Silence notification of no default logging handler
log = logging.getLogger("pyexchange")
class NullHandler(logging.Handler):
def emit | (self, record):
pass
| log.addHandler(NullHandler())
|
#### NOTICE: THIS FILE IS AUTOGENERATED
## | ## MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/crafted/weapon/shared_shield_effectiveness_intensifier_mk4.iff"
result.attribute_template_id = 8
result.stfName("space_crafting_n","shield_effectiveness_intensifier_mk4")
#### BEGIN MODIFICATIONS ####
#### END MO | DIFICATIONS ####
return result |
ullable=True),
sa.Column(u'release_version', sa.TEXT(), nullable=True),
sa.Column(u'major_version', sa.TEXT(), nullable=True),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'full_urls',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'url', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'raw_product_releases',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'version', sa.TEXT(), nullable=False),
sa.Column(u'build', sa.TEXT(), nullable=False),
sa.Column(u'build_type', CITEXT(), nullable=False),
sa.Column(u'platform', sa.TEXT(), nullable=False),
sa.Column(u'product_name', CITEXT(), nullable=False),
sa.Column(u'repository', sa.TEXT(), nullable=False),
sa.Column(u'stability', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'products',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'name', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'crashes_normalized',
sa.Column(u'crash_id', postgresql.UUID(), autoincrement=False,
nullable=False),
sa.Column(u'signature_id', sa.TEXT(), nullable=False),
sa.Column(u'error_message_id', JSON(), nullable=False),
sa.Column(u'product_id', sa.TEXT(), nullable=True),
sa.Column(u'user_agent_id', sa.TEXT(), nullable=True),
sa.PrimaryKeyConstraint(u'crash_id'),
schema=u'bixie'
)
op.create_table(u'hosts',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'name', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'signatures',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'signature', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'crashes',
sa.Column(u'crash_id', postgresql.UUID(), autoincrement=False,
nullable=False),
sa.Column(u'signature', sa.TEXT(), nullable=False),
sa.Column(u'error', JSON(), nullable=False),
sa.Column(u'product', sa.TEXT(), nullable=True),
sa.Column(u'protocol', sa.TEXT(), nullable=True),
sa.Column(u'hostname', sa.TEXT(), nullable=True),
sa.Column(u'username', sa.TEXT(), nullable=True),
sa.Column(u'port', sa.TEXT(), nullable=True),
sa.Column(u'path', sa.TEXT(), nullable=True),
sa.Column(u'query', sa.TEXT(), nullable=True),
sa.Column(u'full_url', sa.TEXT(), nullable=True),
sa.Column(u'user_agent', sa.TEXT(), nullable=True),
sa.Column(u'success', sa.BOOLEAN(), nullable=True),
sa.Column(u'client_crash_datetime',
postgresql.TIMESTAMP(timezone=True), nullable=True),
sa.Column(u'client_submitted_datetime',
postgresql.TIMESTAMP(timezone=True), nullable=True),
sa.Column(u'processor_started_datetime',
postgresql.TIMESTAMP(timezone=True), nullable=True),
sa.Column(u'processor_completed_datetime',
postgresql.TIMESTAMP(timezone=True), nullable=True),
sa.PrimaryKeyConstraint(u'crash_id'),
schema=u'bixie'
)
op.create_table(u'release_channels',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'name', CITEXT(), nullable=False),
sa.Column(u'sort', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'os_names',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'name', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'error_messages',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'error_message', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'product_version_adi',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'product_version_id', sa.INTEGER(), nullable=False),
sa.Column(u'adi_count', sa.BIGINT(), nullable=False),
sa.Column(u'adi_date', sa.INTEGER(), nullable=False),
sa.Column(u'os_name', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'raw_adi',
sa.Column(u'adi_count', sa.BIGINT(), nullable=True),
sa.Column(u'date', sa.DATE(), nullable=True),
sa.Column(u'product_name', sa.TEXT(), nullable=True),
sa.Column(u'product_os_platform', sa.TEXT(), nullable=True),
sa.Column(u'product_os_version', sa.TEXT(), nullable=True),
sa.Column(u'product_version', sa.TEXT(), nullable=True),
sa.Column(u'build', sa.TEXT(), nullable=True),
sa.Column(u'build_channel', sa.TEXT(), nullable=True),
sa.Column(u'product_guid', sa.TEXT(), nullable=True),
sa.Column(u'received_at', postgresql.TIMESTAMP(timezone=True),
nullable=True),
sa.PrimaryKeyConstraint(),
schema=u'bixie'
)
op.create_table(u'users',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'name', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'product_adi',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'product_id', sa.INTEGER(), nullable=False),
sa.Column(u'adi_count', sa.BIGINT(), nullable=False),
sa.Column(u'adi_date', sa.INTEGER(), nullable=False),
sa.Column(u'os_name', CITEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'user_agents',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'error_message_id', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['error_message_id'],
[u'bixie.error_messages.id'], ),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'product_users',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'product_id', sa.INTEGER(), nullable=True),
sa.Column(u'user_id', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['product_id'], [u'bixie.products.id'], ),
sa.ForeignKeyConstraint(['user_id'], [u'bixie.users.id'], ),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'product_release_channels',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'release_channel_id', sa.INTEGER(), nullable=True),
sa.Column(u'product_id', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['product_id'], [u'bixie.products.id'], ),
sa.ForeignKeyConstraint(['release_channel_id'],
[u'bixie.release_channels.id'], ),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'error_message_products',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'error_message_id', sa.INTEGER(), nullable=True),
sa.Column(u'product_id', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['error_message_id'],
[u'bixie.error_messages.id'], ),
sa.ForeignKeyConstraint(['product_id'], [u'bixie.products.id'], ),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table(u'error_message_products', schema=u'bixie')
op.drop_table(u'product_release_channels', schema=u'bixie')
op.drop_table(u'product_users', schema=u'bixie')
op.drop_table(u'user_agents', schema=u'bixie')
op.drop_table(u'product_adi', schema=u'bixie')
op.drop_table(u'users', schema=u'bixie')
op.drop_table(u'raw_adi', schema=u'bixie')
op.drop_table(u'product_version_adi', schema=u'bixie | ')
op.drop_table(u'error_messages', schema=u'bixie')
op.drop_table(u'os_names', schema=u'bixie')
op.drop_table(u'release_channels', schema=u'bixie')
op.drop_table(u'crashes', schema=u'bixie')
op.drop_table(u'signatures', schema=u'bixie')
op.drop_table(u'hosts', schema=u'bixie')
op.drop_table(u'crashes_normalized', schema=u'bixie')
op.drop_table(u'products', schema=u'bixie')
op.drop_table(u'raw_product_re | leases', schema |
#
# author: Cosmin Basca
#
# Copyright 2010 University of Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the Lice | nse at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the speci | fic language governing permissions and
# limitations under the License.
#
__author__ = 'basca'
version = (0, 9, 5)
str_version = '.'.join([str(v) for v in version]) |
ch}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""Implementation of & operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2 | )
return None
return other & self
def __invert__( self ):
"""Implementation of ~ operator - returns C{NotAny}"""
return NotAny( self )
def __call__(self, name):
"""Shortcut for C{setResultsName}, with C{listAllMatches=defau | lt}::
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
could be written as::
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
passed as C{True}.
"""
return self.setResultsName(name)
def suppress( self ):
"""Suppresses the output of this C{ParserElement}; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
Must be called before C{parseString} when the input grammar contains elements that
match C{<TAB>} characters."""
self.keepTabs = True
return self
def ignore( self, other ):
"""Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
"""
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append( other.copy() )
else:
self.ignoreExprs.append( Suppress( other.copy() ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""Enable display of debugging messages while doing pattern matching."""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""Enable display of debugging messages while doing pattern matching.
Set C{flag} to True to enable, False to disable."""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""Check defined expressions for valid structure, check for infinite recursive definitions."""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
f = open(file_or_filename, "rb")
file_contents = f.read()
f.close()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or self.__dict__ == other.__dict__
elif isinstance(other, basestring):
try:
self.parseString(_ustr(other), parseAll=True)
return True
except ParseBaseException:
return False
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
class Token(ParserElement):
"""Abstract C{ParserElement} subclass, for defining atomic matching patterns."""
def __init__( self ):
super(Token,self).__init__( savelist=False )
def setName(self, name):
s = super(Token,self).setName(name)
self.errmsg = "Expected " + self.name
return s
class Empty(Token):
"""An empty token, will always match."""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match."""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl( self, instring, loc, doActions=True ):
raise ParseException(instring, loc, self.errmsg, self)
class Literal(Token):
"""Token to exactly match a specified string."""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
_L = Literal
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with C{Literal}::
Literal("if") will match the leading C{'if'} in |
from .models import HourRegistration
from orders.models import Product
from django.utils import timezone
from django.http import JsonResponse
from django.contrib.auth.decorators import login_required
from datetime import datetime
import pytz
from django.contrib.auth.decorators import permission_required
@login_required
@permission_required('hour_registration.add_hourregistration', 'hour_registration.change_hourregistration')
def start_time_tracking(request, product_id):
product = Product.objects.get(id=product_id)
existing_time = HourRegistration.objects.filter(end=None)
if existing_time.count() == 0:
time = HourRegistration(product=product, start=timezone.now())
time.save()
return JsonResponse({'success': True, 'start': format_time_to_local(time.start)})
else:
return existing_time_tracking(request)
@login_required
@permission_required('hour_registration.change_hourregistration')
def end_time_tracking(request, product_id):
product = Product.objects.get(id=product_id)
time = HourRegistration.objects.filter(product=product, end=None)
if time.count() > 0:
time = time[0]
if not time.end:
time.end = timezone.now()
time.save()
return JsonResponse({'success': True})
@login_required
@permission_required('hour_registration.change_hourregistration')
def add_description_to_hourregistration(request):
if request.method == 'POST':
return add_description_to_hourregistration_post(request)
if request.method == 'GET':
return get_description_to_hourregistration(request)
def add_description_to_hourregistration_post(request):
product_id = request.POST['product_id']
product = Product.objects.get(id=product_id)
time = HourRegistration.objects.filter(product=product, end=None)
if time.count() > 0:
time = time[0]
description = request.POST['description']
time.description = description
time.save()
return JsonResponse({'success': True})
return JsonResponse({'error': 'No open HR found'})
def get_description_to_hourregistration(request):
product_id = request.GET['product_id']
product = Product.objects.get(id=product_id)
time = HourRegistration.objects.filter(product=product, end=None)
if time.count() > 0:
time = time[0]
return JsonResponse({'description': time.description})
return JsonResponse({'error': 'No HR object found'})
@login_required
@permission_required('hour_registration.view_hourregistration')
def existing_time_tracking(request):
time = HourRegistration.objects.filter(end=None).first()
if time is not None:
product = Product.objects.get(id=time.product_id)
return JsonResponse({'pk': time.product_id, 'start': format_time_to_local(time.start), 'title': product.title})
return JsonResponse({"existing": 'False'})
@login_required
@permission_required('hour_registration.delete_hourregistration')
def delete_time_tracking(request):
try:
time_id = request.POST['time_id']
time = HourRegistration.objects.get(pk=time_id)
time.delete()
return JsonResponse({'success': 'true'})
except HourRegistration.DoesNotExist:
return JsonResponse({'error': 'HR object not found'})
@login_required
@permission_required('hour_registration.change_hourregistration')
def set_end_time(request):
if 'pk' in request.POST and 'endDate' in request.POST and 'endTime' in request.POST:
enddate = request.POST['endDate']
endtime = request.POST['endTime']
hour_id = request.POST['pk']
end_date = format_date_and_time(enddate, endtime)
time = HourRegistration.objects.get(pk=hour_id)
time.end = pytz.timezone('Europe/Amsterdam').localize(end_date)
time.save()
return JsonResponse({'success': 'true'})
return JsonResponse({'success': 'false'})
@login_required
@permission_required('hour_registration.add_hourregistration')
def create_new_hour_registration(request):
if 'startDate' in request.POST and 'startTime' in request.POST \
and 'endDate' in request.POST \
and 'endTime' in request.POST \
and 'product_id | ' in request.POST:
startdate = request.POST['startDate']
starttime = request.POST['startTime']
enddate = request.POST['endDate']
endtime = request.POST['endTime']
product_id = request.POST['product_i | d']
description = ""
if 'description' in request.POST:
description = request.POST['description']
start_date = format_date_and_time(startdate, starttime)
end_date = format_date_and_time(enddate, endtime)
product = Product.objects.get(pk=product_id)
time = HourRegistration(start=start_date, end=end_date, product=product, description=description)
time.save()
return JsonResponse({'success': 'true'})
return JsonResponse({'success': 'false'})
def format_time_to_local(time):
return timezone.localtime(time).strftime('%d-%m-%Y %H:%M:%S')
def format_date_and_time(date, time):
return datetime.strptime(date + ' ' + time, '%d-%m-%Y %H:%M')
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Michael DeHaan <michael@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: say
version_added: "1.2"
short_description: Makes a computer to speak.
description:
- makes a computer speak! Amuse your friends, annoy your coworkers!
notes:
- In 2.5, this module has been renamed from C(osx_say) to M(say).
- If you like this module, you may also be interested in the osx_say callback plugin.
options:
msg:
description:
What to say
required: true
voice:
description:
What voice to use
required: false
requirements: [ say or espeak or espeak-ng ]
author:
- "Ansible Core Team"
- "Michael DeHaan (@mpdehaan)"
'''
EXAMPLES = '''
- say:
msg: '{{ inventory_hostname }} is all done'
voice: Zarvox
delegate_to: localhost
'''
import os
from ansible.module_utils.basic import AnsibleModule, get_platform
def say(module, executable, msg, voice):
cmd = [executable, msg]
if voice:
cmd.extend(('-v', voice))
module.run_command(cmd, check_rc=True)
def main():
module = AnsibleModule(
argument_spec=dict(
msg=dict(required=True),
voice=dict(required=False),
),
supports_check_mode=True
)
msg = module.params['msg']
voice = module.params['voice']
possibles = ('say', 'espeak', 'espeak-ng')
if get_platform() != 'Darwin':
# 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter
voice = None
for possible in possibles:
executable = module.get_bin_path(possible)
if executabl | e:
break
else:
module.fail_json(msg='Unable to find either %s' % ', '.join(possibles))
if module.check_mode:
module.exit_ | json(msg=msg, changed=False)
say(module, executable, msg, voice)
module.exit_json(msg=msg, changed=True)
if __name__ == '__main__':
main()
|
from flask import request
from flask_restful import Resource
import json
from core.bo.clienteBo import ClienteBo
class Cliente(Resource):
def __init__(self):
self.cliente = ClienteBo()
def | get(self, parameter=""):
if parameter == "":
return self.cliente.get_all(), 201
else:
parameter = json.loads(parameter)
if parameter.get('id'):
return self.cliente.get_by_id(parameter["id"]), 201
elif parameter.get('d | ocument'):
return self.cliente.get_document(parameter["document"], parameter["cliente"]), 201
elif parameter.get('board'):
return self.cliente.get_board(), 201
else:
return self.cliente.get_by_filter(parameter), 201
def put(self, parameter):
cliente = request.json
return self.cliente.update(parameter, cliente), 201
def post(self, parameter):
file = request.files['arquivo']
return self.cliente.upload(parameter, file), 201 |
# -*- coding: utf-8 -*-
from dCore import *
from dConstants import *
from dLog import *
from dThread import *
from dModules import *
class DamnVideoLoader(DamnThread):
def __init__(self, parent, uris, thengo=False, feedback=True, allownonmodules=True):
DamnThread.__init__(self)
self.uris = []
if type(uris) not in (type(()), type([])):
uris = [uris]
for i in uris:
self.uris.append(DamnUnicode(i))
self.parent = parent
self.thengo = thengo
self.feedback = feedback
self.done = False
self.result = None
self.allownonmodules = allownonmodules
Damnlog('DamnVideoLoader spawned with parameters: parent =',parent,'; thengo?',thengo,'; feedback?',feedback,'; allow non-modules?',allownonmodules)
def go(self):
if self.feedback:
self.parent.toggleLoading(True)
self.vidLoop(self.uris)
self.done = True
if self.feedback:
self.parent.toggleLoading(False)
else:
while self.done:
time.sleep(.1)
def postEvent(self, info):
if self.feedback:
DV.postEvent(self.parent, (DV.evt_loading, info))
def getVidName(self, uri):
return self.parent.getVidName(uri)
def addValid(self, meta):
meta['original'] = self.originaluri
self.result = meta
self.postEvent({'meta':meta, 'go':self.thengo})
def SetStatusText(self, status):
self.postEvent({'status':status})
def showDialog(self, title, content, icon):
self.postEvent({'dialog':(title, content, icon)})
def vidLoop(self, uris):
Damnlog('Starting vidLoop with URIs:',uris)
for uri in uris:
Damnlog('vidLoop considering URI:',uri)
self.originaluri = uri
bymodule = False
for module in DamnIterModules(False):
Damnlog('Trying module',module['class'],'for URI',uri)
mod = module['class'](uri)
if mod.validURI():
Damnlog('Module has been chosen for URI',uri,':',mod)
mod.addVid(self)
bymodule = True
break
if not bymodule:
Damnlog('No module found for URI:',uri)
if not self.allownonmodules:
Damnlog('DamnVideoLoader exitting because no module was found and non-modules are not allowed.')
self.result = None
return
if REGEX_HTTP_GENERIC.match(uri):
Damnlog('HTTP regex still matches URI:',uri)
name = self.getVidName(uri)
if name == DV.l('Unknown title'):
name = REGEX_HTTP_EXTRACT_FILENAME.sub('', uri)
self.addValid({'name':name, 'profile':DV.prefs.get('defaultwebprofile'), 'profilemodified':False, 'fromfile':name, 'dirname':REGEX_HTTP_EXTRACT_DIRNAME.sub('\\1/', uri), 'uri':uri, 'status':DV.l('Pending.'), 'icon':'generic'})
else:
# It's a file or a directory
if os.path.isdir(uri):
Damnlog('URI',uri,'is a directory.')
if DV.prefs.get('DirRecursion') == 'True':
for i in os.listdir(uri):
self.vidLoop([uri + DV.sep + i]) # This is recursive; if i is a directory, this block will be executed for it too
else:
if len(uris) == 1: # Only one dir, so an alert here is tolerable
self.showDialog(DV.l('Recursion is disabled.'), DV.l('This is a directory, but recursion is disabled in the preferences. Please enable it if you want DamnVid to go through directories.'), wx.OK | wx.ICON_EXCLAMATION)
e | lse:
self.SetStatusText(DV.l('Skipped ') + uri + DV.l(' (directory recursion disabled).'))
else:
Damnlog('URI',uri,'is a file.')
filename = os.path.basename(uri)
if uri in self.parent.videos:
self.SetStatusText(DV.l('Skipped ') + filen | ame + DV.l(' (already in list).'))
if len(uris) == 1: # There's only one file, so an alert here is tolerable
self.showDialog(DV.l('Duplicate found'), DV.l('This video is already in the list!'), wx.ICON_EXCLAMATION | wx.OK)
else:
self.addValid({'name':filename[0:filename.rfind('.')], 'profile':DV.prefs.get('defaultprofile'), 'profilemodified':False, 'fromfile':filename, 'uri':uri, 'dirname':os.path.dirname(uri), 'status':DV.l('Pending.'), 'icon':'damnvid'})
DV.videoLoader = DamnVideoLoader
|
#!/usr/bin/env python
"""Vandermonde matrix example
Demonstrates matrix computations using the Vandermonde matrix.
* http://en.wikipedia.org/wiki/Vandermonde_matrix
"""
from sympy import Matrix, pprint, Rational, sqrt, symbols, Symbol, zeros
def symbol_gen(sym_str):
"""Symbol generator
Generates sym_str_n where n is the number of times the generator
has been called.
"""
n = 0
while True:
yield Symbol("%s_%d" % (sym_str, n))
n += 1
def comb_w_rep(n, k):
"""Combinations with repetition
Returns the list of k combinations with repetition from n objects.
"""
if k == 0:
return [[]]
combs = [[i] for i in range(n)]
for i in range(k - 1):
curr = []
for p in combs:
for m in range(p[-1], n):
curr.append(p + [m])
combs = curr
return combs
def vandermonde(order, dim=1, syms='a b c d'):
"""Comptutes a Vandermonde matrix of given order and dimension.
Define syms to give beginning strings for temporary variables.
Returns the Matrix, the temporary variables, and the terms for the
polynomials.
"""
syms = syms.split()
if len(syms) < dim:
new_syms = []
for i in range(dim - len(syms)):
new_syms.append(syms[i%len(syms)] + str(i/len(syms)))
syms.extend(new_syms)
terms = []
for i in range(order + 1):
terms.extend(comb_w_rep(dim, i))
rank = len(terms)
V = zeros(rank)
generators = [symbol_gen(syms[i]) for i in range(dim)]
all_syms = []
for i in range(rank):
row_syms = [g.next() for g in generators]
all_syms.append(row_syms)
for j,term in enumerate(terms):
v_entry = 1
for k in term:
v_entry *= row_syms[k]
V[i*rank + j] = v_entry
return V, all_syms, terms
def gen_poly(points, order, syms):
"""Generates a polynomial using a Vandermonde system"""
num_pts = len(points)
if num_pts == 0:
raise ValueError("Must provide points")
dim = len(points[0]) - 1
if dim > len(syms):
raise ValueError("Must provide at lease %d symbols for the polynomial" % dim)
V, tmp_syms, terms = vandermonde(order, dim)
if num_pts < V.shape[0]:
raise ValueError(
"Must provide %d points for order %d, dimension "\
"%d polynomial, given %d points" % \
(V.shape[0], order, dim, num_pts))
elif num_pts > V.shape[0]:
print "gen_poly given %d points but only requires %d, "\
"continuing using the first %d points" % \
(num_pts, V.shape[0], V.shape[0])
num_pts = V.shape[0]
subs_dict = {}
for j in range(dim):
| for i in range | (num_pts):
subs_dict[tmp_syms[i][j]] = points[i][j]
V_pts = V.subs(subs_dict)
V_inv = V_pts.inv()
coeffs = V_inv.multiply(Matrix([points[i][-1] for i in xrange(num_pts)]))
f = 0
for j,term in enumerate(terms):
t = 1
for k in term:
t *= syms[k]
f += coeffs[j]*t
return f
def main():
order = 2
V, tmp_syms, _ = vandermonde(order)
print "Vandermonde matrix of order 2 in 1 dimension"
pprint(V)
print '-'*79
print "Computing the determinate and comparing to \sum_{0<i<j<=3}(a_j - a_i)"
det_sum = 1
for j in range(order + 1):
for i in range(j):
det_sum *= (tmp_syms[j][0] - tmp_syms[i][0])
print """
det(V) = %(det)s
\sum = %(sum)s
= %(sum_expand)s
""" % { "det": V.det(),
"sum": det_sum,
"sum_expand": det_sum.expand(),
}
print '-'*79
print "Polynomial fitting with a Vandermonde Matrix:"
x,y,z = symbols('x,y,z')
points = [(0,3), (1,2), (2,3)]
print """
Quadratic function, represented by 3 points:
points = %(pts)s
f = %(f)s
""" % { "pts" : points,
"f" : gen_poly(points, 2, [x]),
}
points = [(0, 1, 1), (1, 0, 0), (1, 1, 0), (Rational(1, 2), 0, 0),
(0, Rational(1, 2), 0), (Rational(1, 2), Rational(1, 2), 0)]
print """
2D Quadratic function, represented by 6 points:
points = %(pts)s
f = %(f)s
""" % { "pts" : points,
"f" : gen_poly(points, 2, [x, y]),
}
points = [(0, 1, 1, 1), (1, 1, 0, 0), (1, 0, 1, 0), (1, 1, 1, 1)]
print """
3D linear function, represented by 4 points:
points = %(pts)s
f = %(f)s
""" % { "pts" : points,
"f" : gen_poly(points, 1, [x, y, z]),
}
if __name__ == "__main__":
main()
|
# -*- codin | g: utf-8 -*-
import pandas as pd
import numpy as np
from axiomatic.base import AxiomSystem
from axiomatic.elementary_conditions import MinMaxAxiom
# l, r, pmin, pmax
params = [1, 1, -0.8, 0.8]
axiom_list = [MinMaxAxiom(params)]
ts = pd.DataFrame(np.random.random((10, 2)))
print(ts)
print(MinMaxAxiom(params).run(ts, dict()))
now = AxiomSystem(ax | iom_list)
print(now.perform_marking(ts))
|
s about this component, please refer to the documentation at
https://home-assistant.io/components/proximity/
"""
import logging
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import track_state_change
from homeassistant.util.location import distance
DEPENDENCIES = ['zone', 'device_tracker']
DOMAIN = 'proximity'
# Default tolerance
DEFAULT_TOLERANCE = 1
# Default zone
DEFAULT_PROXIMITY_ZONE = 'home'
# Entity attributes
ATTR_DIST_FROM = 'dist_to_zone'
ATTR_DIR_OF_TRAVEL = 'dir_of_travel'
ATTR_NEAREST = 'nearest'
_LOGGER = logging.getLogger(__name__)
def setup(hass, config): # pylint: disable=too-many-locals,too-many-statements
"""Get the zones and offsets from configuration.yaml."""
ignored_zones = []
if 'ignored_zones' in config[DOMAIN]:
for variable in config[DOMAIN]['ignored_zones']:
ignored_zones.append(variable)
# Get the devices from configuration.yaml.
if 'devices' not in config[DOMAIN]:
_LOGGER.error('devices not found in config')
return False
proximity_devices = []
for variable in config[DOMAIN]['devices']:
proximity_devices.append(variable)
# Get the direction of travel tolerance from configuration.yaml.
tolerance = config[DOMAIN].get('tolerance', DEFAULT_TOLERANCE)
# Get the zone to monitor proximity to from configuration.yaml.
proximity_zone = config[DOMAIN].get('zone', DEFAULT_PROXIMITY_ZONE)
entity_id = DOMAIN + '.' + proximity_zone
proximity_zone = 'zone.' + proximity_zone
state = hass.states.get(proximity_zone)
zone_friendly_name = (state.name).lower()
# Set the default values.
dist_to_zone = 'not set'
dir_of_travel = 'not set'
nearest = 'not set'
proximity = Proximity(hass, zone_friendly_name, dist_to_zone,
dir_of_travel, nearest, ignored_zones,
proximity_devices, tolerance, proximity_zone)
proximity.entity_id = entity_id
proximity.update_ha_state()
# Main command to monitor proximity of devices.
track_state_change(hass, proximity_devices,
proximity.check_proximity_state_change)
return True
class Proximity(Entity): # pylint: disable=too-many-instance-attributes
"""Representation of a Proximity."""
# pylint: disable=too-many-arguments
def __init__(self, hass, zone_friendly_name, dist_to, dir_of_travel,
nearest, ignored_zones, proximity_devices, tolerance,
proximity_zone):
"""Initialize the proximity."""
self.hass = hass
self.friendly_name = zone_friendly_name
self.dist_to = dist_to
self.dir_of_travel = dir_of_travel
self.nearest = nearest
self.ignored_zones = ignored_zones
self.proximity_devices = proximity_devices
self.tolerance = tolerance
self.proximity_zone = proximity_zone
@property
def name(self):
"""Return the name of the entity."""
return self.friendly_name
@property
def state(self):
"""Return the state."""
return self.dist_to
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return "km"
@property
def state_attributes(self):
"""Return the state attributes."""
return {
ATTR_DIR_OF_TRAVEL: self.dir_of_travel,
ATTR_NEAREST: self.nearest,
}
# pylint: disable=too-many-branches,too-many-statements,too-many-locals
def check_proximity_state_change(self, entity, old_state, new_state):
"""Function to perform the proximity checking."""
entity_name = new_state.name
devices_to_calculate = False
devices_in_zone = ''
zone_state = self.hass.states.get(self.proximity_zone)
proximity_latitude = zone_state.attributes.get('latitude')
proximity_longitude = zone_state.attributes.get('longitude')
# Check for devices in the monitored zone.
for device in self.proximity_devices:
device_state = self.hass.states.get(device)
if device_state.state not in self.ignored_zones:
devices_to_calculate = True
# Check the location of all devices.
if (device_state.state).lower() == (self.friendly_name).lower():
device_friendly = device_state.name
if devices_in_zone != '':
devices_in_zone = devices_in_zone + ', '
devices_in_zone = devices_in_zone + device_friendly
# No-one to track so reset the entity.
if not devices_to_calculate:
self.dist_to = 'not set'
self.dir_of_travel = 'not set'
self.nearest = 'not set'
self.update_ha_state()
return
# At least one device is in the monitored zone so update the entity.
if devices_in_zone != '':
self.dist_to = 0
self.dir_of_travel = 'arrived'
self.nearest = devices_in_zone
self.update_ha_state()
return
# We can't check proximity because latitude and longitude don't exist.
if 'latitude' not in new_state.attributes:
return
# Collect distances to the zone for all devices.
distances_to_zone = {}
for device in self.proximity_devices:
# Ignore devices in an ignored zone.
device_state = self.hass.states.get(device)
if device_state.state in self.ignored_zones:
continue
# Ignore devices if proximity cannot be calculated.
if 'latitude' not in device_state.attributes:
continue
# Calculate the distance to the proximity zone.
dist_to_zone = distance(proximity_latitude,
proximity_longitude,
device_state.attributes['latitude'],
device_state.attributes['longitude'])
# Add the device and distance to a dictionary.
distances_to_zone[device] = round(dist_to_zone / 1000, 1)
# Loop through each of the distances collected and work out the
# closest.
closest_device = ''
dist_to_zone = 1000000
for device in distances_to_zone:
if distances_to_zone[device] < dist_to_zone:
closest_device = device
dist_to_zone = distances_to_zone[device]
# If the closest device is one of the other devices.
if closest_device != entity:
self.dist_to = round(distances_to_zone[closest_device])
self.dir_of_travel = 'unknown'
device_state = self.hass.states.get(closest_device)
self.nearest = device_state.name
se | lf.update_ha_state()
return
# Stop if we cannot calculate the direction of travel (i.e. we don't
# have a previous state and a current LAT and LONG).
if old_state is None or 'latitude' not in old_state.attributes:
self.dist_to = round(distances_to_zon | e[entity])
self.dir_of_travel = 'unknown'
self.nearest = entity_name
self.update_ha_state()
return
# Reset the variables
distance_travelled = 0
# Calculate the distance travelled.
old_distance = distance(proximity_latitude, proximity_longitude,
old_state.attributes['latitude'],
old_state.attributes['longitude'])
new_distance = distance(proximity_latitude, proximity_longitude,
new_state.attributes['latitude'],
new_state.attributes['longitude'])
distance_travelled = round(new_distance - old_distance, 1)
# Check for tolerance
if distance_travelled < self.tolerance * -1:
direction_of_travel = 'towards'
elif distance_travelled > self.tolerance:
direction_of_travel = 'away_from'
else:
|
from h | andlers.BaseHandlers import BaseHandler
class HomePageHandler(BaseHandler):
def get(self, *args, **kwargs):
| self.render('home.html')
|
# -*- coding: iso-8859-1 -*-
"""MoinMoin Desktop Edition (MMDE) - Configuration
ONLY to be used for MMDE - if you run a personal wiki on your notebook or PC.
This is NOT intended for internet or server or multiuser use due to relaxed security settings!
"""
import sys, os
from MoinMoin.config import multiconfig, url_prefix_static
class LocalConfig(multiconfig.DefaultConfig):
# vvv DON'T TOUCH THIS EXCEPT IF YOU KNOW WHAT YOU DO vvv
# Directory containing THIS wikiconfig:
wikiconfig_dir = os.path.abspath(os.path.dirname(__file__))
# We assume this structure for a simple "unpack and run" scenario:
# wikiconfig.py
# wiki/
# data/
# underlay/
# If that's not true, feel free to just set instance_dir to the real path
# where data/ and underlay/ is located:
#instance_dir = '/where/ever | /your/instance/is'
instance_dir = os.path.join(wikiconfig_dir, 'wiki')
# Where your own wiki pages are (make regular | backups of this directory):
data_dir = os.path.join(instance_dir, 'data', '') # path with trailing /
# Where system and help pages are (you may exclude this from backup):
data_underlay_dir = os.path.join(instance_dir, 'underlay', '') # path with trailing /
DesktopEdition = True # give all local users full powers
acl_rights_default = u"All:read,write,delete,revert,admin"
surge_action_limits = None # no surge protection
sitename = u'MoinMoin DesktopEdition'
logo_string = u'<img src="%s/common/moinmoin.png" alt="MoinMoin Logo">' % url_prefix_static
# ^^^ DON'T TOUCH THIS EXCEPT IF YOU KNOW WHAT YOU DO ^^^
#page_front_page = u'FrontPage' # change to some better value
# Add your configuration items here.
secrets = 'This string is NOT a secret, please make up your own, long, random secret string!'
# Set this to your default front page, default is a blank page.
page_front_page = u"MyStartingPage"
# DEVELOPERS! Do not add your configuration items there,
# you could accidentally commit them! Instead, create a
# wikiconfig_local.py file containing this:
#
# from wikiconfig import LocalConfig
#
# class Config(LocalConfig):
# configuration_item_1 = 'value1'
#
try:
from wikiconfig_local import Config
except ImportError, err:
if not str(err).endswith('wikiconfig_local'):
raise
Config = LocalConfig
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011-2013,2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from bokehgui_swig import waterfall_sink_f_proc
from gnuradio import blocks, filter, gr, gr_unittest
class qa_waterfall_sink_f(gr_unitte | st.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001_t(self):
original = (1,) * 100 + (-1,) * 100 + (0,) * 50 + (10,) + (0,) * 49
expected_result = [(-200,) * 50 + (0,) + (-200,) * 49, (-20,) * 100]
src = blocks.vector_source_f(original, False, 1, [])
dst = waterfall_sink_f_proc(100, filter.firdes.WIN_RECTANGULAR, 0,
15000, 'Test')
| self.tb.connect(src, dst)
self.tb.run()
result_data = dst.get_plot_data()
result_data1 = dst.get_plot_data()
self.assertEqual(expected_result[0], tuple(result_data[0]))
self.assertEqual(expected_result[0], tuple(result_data1[0]))
self.tb.stop()
self.tb.wait()
if __name__ == '__main__':
gr_unittest.run(qa_waterfall_sink_f, "qa_waterfall_sink_f.xml")
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
The Art of an Artificial Intelligence
http://art-of-ai.com
https://github.com/artofai
"""
__author__ = 'xevaquor'
__license__ = 'MIT'
import numpy as np
import util
class LayerBase(object):
def __init__(self):
self.size = None
self.W = np.zeros((0,0))
self.a = None
self.v = None
def random_init(self):
raise NotImplementedError()
class SigmoidOutputLayer(LayerBase):
def __init__(self, neurons_count, previous_layer_count):
self.size = neurons_count
self.prev_layer_size = previous_layer_count
self.W = np.zeros((self.prev_layer_size + 1, self.size))
def random_init(self):
self.W = np.random.normal(size=self.W.shape)
def phi(self, v):
return 1. / (1 + np.exp(-v))
def phi_prime(self, v):
return np.exp(-v)/((1 + np.exp(-v))**2)
class SigmoidInputLayer(LayerBase):
def __init__(self, input_size):
self.size = input_size
self.W = np.zeros((0,0))
def random_init(self):
pass
class SigmoidHiddenLayer(LayerBase):
def __init__(self, layer_size, prev_layer_size):
self.size = layer_size
self.prev_layer_size = prev_layer_size
self.W = np.zeros((self.prev_layer_size + 1, self.size))
def phi(self, v):
return 1. / (1 + np.exp(-v))
def phi_prime(self, v):
return np.exp(-v)/((1 + np.exp(-v))**2)
def random_init(self):
self.W = np.random.normal(size=self.W.shape)
class NN(object):
def __init__(self, input_size, hidden_sizes, output_size):
self.layers = [SigmoidInputLayer(input_size)]
for size in hidden_sizes:
self.layers.append(SigmoidHiddenLayer(size, self.layers[-1].size))
self.layers.append(SigmoidOutputLayer(output_size, self.layers[-1].size))
def set_wages(self, wages):
shapes = list([l.W.shape for l in self.layers[1:]])
packed = list(util.wrap_matrix(wages, shapes))
assert len(packed) == len(self.layers) - 1
for i, layer in enumerate(self.layers[1:]):
layer.W = packed[i]
def get_wages(self):
all_wages = [layer.W for layer in self.layers]
return util.unwrap_matrix(all_wages)
def random_init(self):
for layer in self.layers:
layer.random_init()
def forward(self, X):
# hiden layer
m, n = X.shape
# examples, features
assert n == self.layers[0].size
self.layers[0].a = X
for i in range(1, len(self.layers)):
source_layer = self.layers[i-1]
dest_layer = self.layers[i]
bias = np.ones((source_layer.a.shape[0], 1))
source_layer.a = np.hstack((bias, source_layer.a))
dest_layer.v = np.dot(source_layer.a, dest_layer.W)
dest_layer.a = dest_layer.phi(dest_layer.v)
self.y_hat = self.layers[-1].a
return self.y_hat
def cost(self, X, y):
self.y_hat = self.forward(X)
J = 0.5*np.sum((y-self.y_hat)**2)
return J
def nabla_cost(self, X, y):
self.f | orward(X)
return (self.y_hat - y)
def cost_prime(self, X, y):
# output layer delta
deltas | = [None] * len(self.layers)
differentials = [None] * len(self.layers)
nabla_cost = self.nabla_cost(X, y)
deltas[-1] = np.multiply(nabla_cost, self.layers[-1].phi_prime(self.layers[-1].v))
differentials[2] = np.dot(self.layers[1].a.T, deltas[2])
truncatedW = self.layers[-1].W[1:, :]
#truncatedW = self.layers[-1].W
deltas[1] = np.multiply(np.dot(deltas[2], truncatedW.T), self.layers[1].phi_prime(self.layers[1].v))
bias = np.ones((X.shape[0], 1))
biased = np.hstack((bias, X))
differentials[1] = np.dot(self.layers[0].a.T, deltas[1])
return differentials[1:]
if __name__ == '__main__':
dd = NN(5, [2,3,4], 3)
dd.random_init()
X = np.array([[1,2,3,4,5],
[10,20,30,40,50],
[8,6,4,2,4]],dtype=float)
Y = np.array([[1,0,1],
[1,10,3],
[1,-4,4]], dtype=float)
yyy = dd.forward(X)
#print(yyy)
|
'''
@author: Team Alpha, <aa5186@nyu.edu>
Name: Customer Model
Purpose: This library is part of the | customer REST API for the ecommerce website
'''
from cust | omer import Customer
|
# this app has been deprecated but sticks around for | migrations | dependencies
|
"""
Flarf: Flask Request Filter
-------------
Configurable request filters
"""
from setuptools import setup
setup(
name='Flask-Flarf',
version='0.0.5',
url='https://github.com/thrisp/flarf',
license='MIT',
author='Thrisp/Hurrata',
author_email='blueblank@gmail.com',
description='Flask request filtering',
long_description=__doc__,
packages=['flask_flarf'],
zip_safe=False,
platforms='any',
install_requires=[
'Fla | sk>=0.9'
],
test_suite='nose.collector',
tests_require=[
'nose',
'blinker'],
classifiers=[
'Development Stat | us :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._bch_expansion import bch_expand
from ._channel_state import (amplitude_damping_channel, dephasing_channel,
depolarizing_channel)
from ._commutators import anticommutator, commutator, double_commutator
from ._grid import Grid
from ._lattice import (HubbardSquareLattice, SpinPairs, Spin)
from ._lcu_util import (lambda_norm,
preprocess_lcu_coe | fficients_for_reversible_sampling)
from ._operator_utils import (chemist_ordered, count_q | ubits,
eigenspectrum, fourier_transform,
freeze_orbitals, get_file_path,
hermitian_conjugated, inline_sum,
inverse_fourier_transform,
is_hermitian, is_identity,
normal_ordered, prune_unused_indices,
reorder, up_then_down,
load_operator, save_operator,
group_into_tensor_product_basis_sets)
from ._rdm_mapping_functions import (kronecker_delta,
map_two_pdm_to_two_hole_dm,
map_two_pdm_to_one_pdm,
map_one_pdm_to_one_hole_dm,
map_one_hole_dm_to_one_pdm,
map_two_pdm_to_particle_hole_dm,
map_two_hole_dm_to_two_pdm,
map_two_hole_dm_to_one_hole_dm,
map_particle_hole_dm_to_one_pdm,
map_particle_hole_dm_to_two_pdm)
from ._slater_determinants import (gaussian_state_preparation_circuit,
slater_determinant_preparation_circuit)
from ._special_operators import (majorana_operator, number_operator,
s_minus_operator, s_plus_operator,
s_squared_operator,
sx_operator, sy_operator, sz_operator)
from ._testing_utils import (haar_random_vector,
random_antisymmetric_matrix,
random_diagonal_coulomb_hamiltonian,
random_hermitian_matrix,
random_interaction_operator,
random_quadratic_hamiltonian,
random_qubit_operator,
random_unitary_matrix,
module_importable)
from ._trotter_error import error_bound, error_operator
from ._trotter_exp_to_qgates import (pauli_exp_to_qasm,
trotterize_exp_qubop_to_qasm,
trotter_operator_grouping)
from ._unitary_cc import (uccsd_convert_amplitude_format,
uccsd_generator,
uccsd_singlet_generator,
uccsd_singlet_get_packed_amplitudes,
uccsd_singlet_paramsize)
# Imports out of alphabetical order to avoid circular dependency.
from ._jellium_hf_state import hartree_fock_state_jellium
from ._low_rank import (get_chemist_two_body_coefficients,
low_rank_two_body_decomposition,
prepare_one_body_squared_evolution)
from ._low_depth_trotter_error import (
low_depth_second_order_trotter_error_bound,
low_depth_second_order_trotter_error_operator)
from ._sparse_tools import (boson_ladder_sparse,
boson_operator_sparse,
expectation,
expectation_computational_basis_state,
get_density_matrix,
get_gap,
get_ground_state,
get_linear_qubit_operator_diagonal,
inner_product,
jordan_wigner_sparse,
jw_configuration_state,
jw_hartree_fock_state,
jw_get_gaussian_state,
jw_get_ground_state_at_particle_number,
jw_number_restrict_operator,
jw_number_restrict_state,
jw_slater_determinant,
jw_sz_restrict_operator,
jw_sz_restrict_state,
qubit_operator_sparse,
sparse_eigenspectrum,
variance)
from ._davidson import Davidson, DavidsonOptions, QubitDavidson, SparseDavidson
from ._linear_qubit_operator import (
LinearQubitOperator,
LinearQubitOperatorOptions,
ParallelLinearQubitOperator,
generate_linear_qubit_operator,
)
from ._pubchem import geometry_from_pubchem
|
"""
Feature detection (Szeliski 4.1.1)
"""
import numpy as np
import scipy.signal as sig
import scipy.ndimage as ndi
from compvis.utils import get_patch
def sum_sq_diff(img_0, img_1, u, x, y, x_len, y_len):
"""
Returns the summed square difference between two image patches, using even
weighting across the patch.
Parameters :
img_0, img_1 : two images being compared
u : displacement vector between patches
x, y : coordinates of top-left corner of first patch
x_len, y_len : dimensions of patch
"""
patch_0 = get_patch(img_0, x, y, x_len, y_len)
patch_1 = get_patch(img_1, x + u[0], y + u[1], x_len, y_len)
return ((patch_1 - patch_0)**2).sum()
def autocorr(img, | u, x, y, x_len, y_len):
"""
Returns the auto-correlation function for an image patch with a
displacement of u. Uses even weighting across the patch.
(This function simply calls sum_sq_diff() with both images the same, and is
left here for reference/convenience.)
Parameters :
img | : image
u : displacement vector between patches
x, y : coordinates of top-left corner of patch
x_len, y_len : dimensions of patch
"""
return sum_sq_diff(img, img, u, x, y, x_len, y_len)
def autocorr_surface(img, u_x_range, u_y_range, x, y, x_len, y_len):
"""
Returns an auto-correlation surface for an image patch with a given range
of displacements.
Parameters :
img : image
u_x_range, u_y_range : ranges of displacements (tuples)
x, y : coordinates of top-left corner of patch
x_len, y_len : dimensions of patch
Returns :
surface : auto-correlation values
X, Y : grid mesh
"""
# Grid mesh
X, Y = np.meshgrid(range(u_x_range[0], u_x_range[1]),
range(u_y_range[0], u_y_range[1]))
# Auto-correlation surfacae
s = np.array([autocorr(img, (u_x, u_y), x, y, x_len, y_len)
for u_x, u_y in zip(np.ravel(X), np.ravel(Y))])
surface = s.reshape(X.shape)
return surface, X, Y
def harris(img, sigma_d=1, sigma_i=2, alpha=0.06, filter_type='gaussian'):
"""
Returns the Harris interest scores for corner detection.
Parameters :
img : image
sigma_d : width of derivative Gaussian
sigma_i : width of integration Gaussian
alpha : parameter in Harris-Stephens (1988) score
filter_type : 'gaussian' or 'sobel'
(Default values for sigma_d and sigma_i from Szeliski pp. 190)
(Default value for alpha from Szeliski pp. 189)
"""
#--- Gradients in x and y
# Derivative of Gaussian
if filter_type is 'gaussian':
I_x = ndi.gaussian_filter(img, sigma_d, (1, 0))#, mode='nearest')
I_y = ndi.gaussian_filter(img, sigma_d, (0, 1))#, mode='nearest')
# Sobel
elif filter_type is 'sobel':
I_x = ndi.sobel(img, 0)
I_y = ndi.sobel(img, 1)
#---
# Outer products
I_xx = I_x**2
I_yy = I_y**2
I_xy = I_x * I_y
# Convolve with Gaussian to get auto-correlation matrix
A_xx = ndi.gaussian_filter(I_xx, sigma_i)
A_yy = ndi.gaussian_filter(I_yy, sigma_i)
A_xy = ndi.gaussian_filter(I_xy, sigma_i)
# Harris scores
A_det = A_xx * A_yy - A_xy**2
A_tr = A_xx + A_yy
# (Harris-Stephens 1988)
#return A_det - alpha * A_tr**2
# Harmonic mean (Brown-Szeliski-Winder 2005)
return A_det / A_tr
def select_scores(scores, n_points, border=10):
"""
Selects the best scores from a given map.
Parameters :
scores : 2D score array
n_points : number of points to select
border : minimum distance from image boundaries
"""
# Mask out points too close to boundary
mask = np.zeros(scores.shape)
mask[border:-border, border:-border] = 1
scores *= mask
# Sort coordinates by response strength
coords_sorted_score = np.array(np.unravel_index(np.argsort(scores, axis=None), scores.shape)).T
#scores_sorted = scores[coords_sorted]
# Get highest scores
best_coords = coords_sorted_score[-n_points:]
best_scores = [scores[coord[0], coord[1]] for coord in best_coords]
return np.array(best_coords), np.array(best_scores)
def select_scores_anms(scores, n_points, c_robust=0.9, border=10):
"""
Selects the best scores from a given map, applying adaptive non-maximal
supression.
Parameters :
scores : 2D score array
n_points : number of points to select
c_robust : robustifying parameter
border : minimum distance from image boundaries
"""
# Mask out points too close to boundary
mask = np.zeros(scores.shape)
mask[border:-border, border:-border] = 1
scores *= mask
# Apply ANMS selection (prevent dense clusters)
supp_radii = get_suppression_radii(scores, c_robust)
# Sort coordinates by supression radii
coords_sorted_supp = np.array(np.unravel_index(np.argsort(supp_radii, axis=None), supp_radii.shape)).T
# Get scores with highest supression radii
best_coords = coords_sorted_supp[-n_points:]
best_scores = [scores[coord[0], coord[1]] for coord in best_coords]
return np.array(best_coords), np.array(best_scores)
def get_suppression_radii(scores, c_robust):
supp_radii = np.zeros(scores.shape)
# Coordinate with highest score
coord_max = np.unravel_index(scores.argmax(), scores.shape)
for i in range(scores.shape[0]):
for j in range(scores.shape[1]):
score = scores[i, j]
if score == 0:
continue
# Skip the highest score (infinite suppression radius)
if (i, j) == coord_max:
continue
# Find suppression radius
r = 0
r_found = False
while not r_found:
r += 1
# Keep the candidate "window" within the image
x0 = i-r if i-r >= 0 else 0
x1 = i+r+1 if i+r+1 < scores.shape[0] else scores.shape[0]-1
y0 = j-r if j-r >= 0 else 0
y1 = j+r+1 if j+r+1 < scores.shape[1] else scores.shape[1]-1
candidates = scores[x0:x1, y0:y1]
# If a significantly stronger neighbour is found
if np.count_nonzero(score < c_robust*candidates):
r_found = True
break
supp_radii[i][j] = r
# Set the highest score to have the largest supression radius
supp_radii[coord_max] = supp_radii.max() + 1
return supp_radii
|
"""
This | file implements the lowering for `dict()`
"""
from numba.targets.imputils import lower_builtin
@lower_builtin(dict)
def impl_dict(context, builder, sig, args):
"""
The `dict()` implementation simply forwards the work to `Dict.empty()`.
"""
from numba.typed import Dict
dicttype = sig.return_type
kt, vt = dicttype | .key_type, dicttype.value_type
def call_ctor():
return Dict.empty(kt, vt)
return context.compile_internal(builder, call_ctor, sig, args)
|
""" Copyright 2012, July 31
Written by Pattarapol (Cheer) Iamngamsup
E-mail: IAM.PATTARAPOL@GMAIL.COM
| Sum square difference
Problem 6
The sum of the squares of the first ten natural numbers is,
1^2 + 2^2 + ... + 10^2 = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 1 | 0)^2 = 55^2 = 3025
Hence the difference between the sum of the squares of
the first ten natural numbers and the square of the sum is
3025 385 = 2640.
Find the difference between the sum of the squares of
the first one hundred natural numbers and the square of the sum.
"""
#################################################
# Importing libraries & modules
import datetime
#################################################
# Global variables
#################################################
# Functions
#################################################
# Classes
#################################################
# Main function
def main():
squareOfSum = ( ( ( 1+100 ) * 100 ) / 2)**2
sumOfSquare = 0
for i in range( 1, 101 ):
sumOfSquare += i*i
print( 'answer = {0}'.format( squareOfSum - sumOfSquare ) )
#################################################
# Main execution
if __name__ == '__main__':
# get starting date time
startingDateTime = datetime.datetime.utcnow()
print( 'startingDateTime = {0} UTC'.format( startingDateTime ) )
# call main function
main()
# get ending date time
endingdateTime = datetime.datetime.utcnow()
print( 'endingdateTime = {0} UTC'.format( endingdateTime ) )
# compute delta date time
deltaDateTime = endingdateTime - startingDateTime
print( 'deltaDateTime = {0}'.format( deltaDateTime ) )
|
'pycsw:PublicationDate']},
'apiso:OrganisationName': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:pointOfContact/gmd:CI_ResponsibleParty/gmd:organisationName/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:OrganizationName']},
'apiso:HasSecurityConstraints': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:resourceConstraints/gmd:MD_SecurityConstraints', 'dbcol': self.context.md_core_model['mappings']['pycsw:SecurityConstraints']},
'apiso:Language': {'xpath': 'gmd:language/gmd:LanguageCode|gmd:language/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:Language']},
'apiso:ParentIdentifier': {'xpath': 'gmd:parentIdentifier/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:ParentIden | tifier']},
'apiso:KeywordType': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:descriptiveKeywords/gmd:MD_Keywords/gmd:type/gmd:MD_KeywordTypeCode', 'dbcol': self.context.md_core_model['mappings']['pycsw:KeywordType']},
'apiso:TopicCategory': {'xpath': | 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:topicCategory/gmd:MD_TopicCategoryCode', 'dbcol': self.context.md_core_model['mappings']['pycsw:TopicCategory']},
'apiso:ResourceLanguage': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:code/gmd:MD_LanguageTypeCode', 'dbcol': self.context.md_core_model['mappings']['pycsw:ResourceLanguage']},
'apiso:GeographicDescriptionCode': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:extent/gmd:EX_Extent/gmd:geographicElement/gmd:EX_GeographicDescription/gmd:geographicIdentifier/gmd:MD_Identifier/gmd:code/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:GeographicDescriptionCode']},
'apiso:Denominator': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:spatialResolution/gmd:MD_Resolution/gmd:equivalentScale/gmd:MD_RepresentativeFraction/gmd:denominator/gco:Integer', 'dbcol': self.context.md_core_model['mappings']['pycsw:Denominator']},
'apiso:DistanceValue': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:spatialResolution/gmd:MD_Resolution/gmd:distance/gco:Distance', 'dbcol': self.context.md_core_model['mappings']['pycsw:DistanceValue']},
'apiso:DistanceUOM': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:spatialResolution/gmd:MD_Resolution/gmd:distance/gco:Distance/@uom', 'dbcol': self.context.md_core_model['mappings']['pycsw:DistanceUOM']},
'apiso:TempExtent_begin': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:extent/gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod/gml:beginPosition', 'dbcol': self.context.md_core_model['mappings']['pycsw:TempExtent_begin']},
'apiso:TempExtent_end': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:extent/gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod/gml:endPosition', 'dbcol': self.context.md_core_model['mappings']['pycsw:TempExtent_end']},
'apiso:AnyText': {'xpath': '//', 'dbcol': self.context.md_core_model['mappings']['pycsw:AnyText']},
'apiso:ServiceType': {'xpath': 'gmd:identificationInfo/srv:SV_ServiceIdentification/srv:serviceType/gco:LocalName', 'dbcol': self.context.md_core_model['mappings']['pycsw:ServiceType']},
'apiso:ServiceTypeVersion': {'xpath': 'gmd:identificationInfo/srv:SV_ServiceIdentification/srv:serviceTypeVersion/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:ServiceTypeVersion']},
'apiso:Operation': {'xpath': 'gmd:identificationInfo/srv:SV_ServiceIdentification/srv:containsOperations/srv:SV_OperationMetadata/srv:operationName/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:Operation']},
'apiso:CouplingType': {'xpath': 'gmd:identificationInfo/srv:SV_ServiceIdentification/srv:couplingType/srv:SV_CouplingType', 'dbcol': self.context.md_core_model['mappings']['pycsw:CouplingType']},
'apiso:OperatesOn': {'xpath': 'gmd:identificationInfo/srv:SV_ServiceIdentification/srv:operatesOn/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:RS_Identifier/gmd:code/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:OperatesOn']},
'apiso:OperatesOnIdentifier': {'xpath': 'gmd:identificationInfo/srv:SV_ServiceIdentification/srv:coupledResource/srv:SV_CoupledResource/srv:identifier/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:OperatesOnIdentifier']},
'apiso:OperatesOnName': {'xpath': 'gmd:identificationInfo/srv:SV_ServiceIdentification/srv:coupledResource/srv:SV_CoupledResource/srv:operationName/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:OperatesOnName']},
},
'AdditionalQueryables': {
'apiso:Degree': {'xpath': 'gmd:dataQualityInfo/gmd:DQ_DataQuality/gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:pass/gco:Boolean', 'dbcol': self.context.md_core_model['mappings']['pycsw:Degree']},
'apiso:AccessConstraints': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:accessConstraints/gmd:MD_RestrictionCode', 'dbcol': self.context.md_core_model['mappings']['pycsw:AccessConstraints']},
'apiso:OtherConstraints': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:otherConstraints/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:OtherConstraints']},
'apiso:Classification': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:accessConstraints/gmd:MD_ClassificationCode', 'dbcol': self.context.md_core_model['mappings']['pycsw:Classification']},
'apiso:ConditionApplyingToAccessAndUse': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:useLimitation/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:ConditionApplyingToAccessAndUse']},
'apiso:Lineage': {'xpath': 'gmd:dataQualityInfo/gmd:DQ_DataQuality/gmd:lineage/gmd:LI_Lineage/gmd:statement/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:Lineage']},
'apiso:ResponsiblePartyRole': {'xpath': 'gmd:contact/gmd:CI_ResponsibleParty/gmd:role/gmd:CI_RoleCode', 'dbcol': self.context.md_core_model['mappings']['pycsw:ResponsiblePartyRole']},
'apiso:SpecificationTitle': {'xpath': 'gmd:dataQualityInfo/gmd:DQ_DataQuality/gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:title/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:SpecificationTitle']},
'apiso:SpecificationDate': {'xpath': 'gmd:dataQualityInfo/gmd:DQ_DataQuality/gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:date/gco:Date', 'dbcol': self.context.md_core_model['mappings']['pycsw:SpecificationDate']},
'apiso:SpecificationDateType': {'xpath': 'gmd:dataQualityInfo/gmd:DQ_DataQuality/gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:dateType/gmd:CI_DateTypeCode', 'dbcol': self.context.md_core_model['mappings']['pycsw:SpecificationDateType']},
'apiso:Creator': |
import sys
from core import loop
from util import jsonmanager, debug
def make_console_menu(name):
menu_data_file_path = '_Resources/Data/MenuData/'
path = menu_data_file_path + name + '.json'
data = jsonmanager.get_data(path)
title = data['Title']
item_data = data['Items']
args = []
for item_datum in item_data:
args.append((item_datum['Text'], item_datum['Action']))
return ConsoleMenu(title, args)
class ConsoleMenuItem:
def __init__(self, text, action):
self.text = text
self.action = action
def invoke(self):
try:
getattr(sys.modules[__name__], self.action)()
except AttributeError as error:
debug.log('Something went wrong :(')
debug.log(error.args)
raise error
class ConsoleMenu:
def __init__(self, title, args):
self.title = title
self.menu_items = [ | ]
for argument in args:
self.add_menu_item(argument[0], argument[1])
def add_menu_item(self, text, action):
self.menu_items.append(ConsoleMenuItem(text, action))
def get_menu_item(self, index):
return self.menu_items[index]
def display_menu_item(self, index):
menu_item = self.get_menu_item(index)
print('[' + str(index) + '] - ' + menu_item.text)
def run(self):
for index in range(0, len(self.m | enu_items)):
self.display_menu_item(index)
result = input('Choose an option: ')
self.get_menu_item(int(result)).invoke()
def run_loop(game_loop):
game_loop.set_scene('pallet-town')
game_loop.run()
def run_editor():
run_loop(loop.EditorLoop())
def run_game():
run_loop(loop.DefaultGameLoop())
|
#!/usr/bin/env python3
import idmaker
import utils
from tkinter import *
from PIL import Image, ImageTk
top = Tk()
top.wm_title("Voice Research Laboratory")
top.iconbitmap('icons/favicon.ico')
top.state('zoomed')
class MainWindow:
def __init__(self, top):
self.top = top
self.banner()
#self.makebuttons()
self.mainmenu()
def banner(self):
self.image = Image.open("icons/logo.png")
self.photo = ImageTk.PhotoImage(self.image)
self.bannerlabel = Label(self.top, image=self.photo)
self.bannerlabel.pack()
def mainmenu(self):
self.menubar = Menu(self.top)
#file menu
self.filemenu = Menu(self.menubar, tearoff=0)
self.filemenu.add_command(label='Renamer', command=self.renamerCallBack)
self.filemenu.add_separator()
self.filemenu.add_command(label="Exit", command=self.exitCallback)
self.menubar.add_cascade(label="File", menu=self.filemenu)
#experiement menu
self.experimentmenu = Menu(self.menubar, tearoff=0)
self.experimentmenu.add_command(label='ID Encryption', command=idmaker.makeid)
self.menubar.add_cascade(label='Experiments', menu=self.experimentmenu)
#stimuli menu
self.stimulimenu = Menu(self.menubar, tearoff=0)
self.stimulimenu.add_command(label='Manipulate Pitch', command=self.manipulatevoicescallback)
self.stimulimenu.add_command(label='Measure Duration', command=self.measuredurationcallback)
self.stimulimenu.add_command(label='Measure Pitch', command=self.measurepitchcallback)
self.menubar.add_cascade(label='Voice Stimuli', menu=self.stimulimenu)
#images menu
self.imagesmenu = Menu(self.menubar, tearoff=0)
self.imagesmenu.add_command(label='Flip horizontal', command=utils.Imagez.flip_horizontal)
self.imagesmenu.add_command(label='Flip vertical', command=utils.Imagez.flip_vertical)
self.imagesmenu.add_command(label='Invert colour', command=utils.Imagez.invert_colour)
self.imagesmenu.add_command(label='Convert to .jpg', command=utils.Imagez.convert2jpg)
self.menubar.add_cascade(label='Images', menu=self.imagesmenu)
#help menu
self.helpmenu = Menu(self.menubar, tearoff=0)
self.helpmenu.add_command(label="Help", command=self.helpCallBack)
self.menubar.add_cascade(label="Help", menu=self.helpmenu)
# display the menu
self.top.config(menu=self.menubar)
def makebuttons(self):
self.renamerbutton = Button(self.top, text="Renamer", font=("Arial", 32), command=self.renamerCallBack)
self.renamerbutton.pack()
self.emailbutton = Button(self.top, text='Email', font=("Arial", 32), command=self.emailCallBack)
self.emailbutton.pack()
self.manipulatevoicebutton = Button(self.top,
text = 'Manipulate Voices',
font=("Arial", 32),
command=self.manipulatevoicescallback)
self.manipulatevoicebutton.pack()
self.helpbutton = Button(self.top, text='Help', font=("Arial", 32), command=self.helpCallBack)
self.helpbutton.pack()
self.exitbutton = Button(self.top, text='Exit', font=("Arial", 32), command=self.exitCallback)
self.exitbutton.pack()
def manipulatevoicessubmitcallback(self):
self.sex = self.var.get()
if self.sex == 0:
utils.Praat.manipulateFo(sex='female')
self.manipulatewindow.destroy()
elif self.sex == 1:
utils.Praat.manipulateFo(sex='male')
self.manipulatewindow.destroy()
def manipulatevoicescallback(self):
self.manipulatewindow = Toplevel()
self.var = IntVar()
self.radiolabel = Label(self.manipulatewindow, text='Choose sex of voices', font=('Arial', 32), justify=LEFT)
self.radiolabel.pack(anchor=W)
self.R1 = Radiobutton(self.manipulatewindow, text="female", font=('Arial', 32), variable=self.var, value=0)
self.R1.pack(anchor=W)
self.R2 = Radiobutton(self.manipulatewindow, text="male", font=('Arial', 32), variable=self.var, value=1)
self.R2.pack(anchor=W)
self.radiosubmitbutton = Button(self.manipulatewindow, text = 'Submit', font=('Arial', 32), command=self.manipulatevoicessubmitcallback)
self.radiosubmitbutton.pack(anchor=W)
self.instructlabel = Label(self.manipulatewindow,
text='When you press submit, two windows will pop up.\n'
'First select the directory where your unmanipulated .wav '
'files are.\nThen select the directory you want to save '
'the manipulated .wav files to.', font=('Arial', 18), justify=LEFT)
self.instructlabel.pack(anchor=W)
def measuredurationcallback(self):
utils.Praat.measureDuration()
def measurepitchcallback(self):
utils.Praat.measureF0()
def exitCallback(self):
self.top.destroy()
def getReplacement(self):
self.pattern = self.patterns.get()
self.replacement = self.replacements.get()
utils.Filez.renamer(self.pattern, self.replacement)
self.renamerwindow.destroy()
def renamerCallBack(self):
self.renamerwindow = Toplevel()
self.entrytextLabel = Label(self.renamerwindow, text="Input search Pattern")
self.entrytextLabel.pack()
self.patterns = Entry(self.renamerwindow)
self.patterns.pack()
self.textLabel2 = Label(self.renamerwindow, text="Input replacement text")
self.textLabel2.pack()
self.replacements = Entry(self.renamerwindow)
self.replacements.pack()
self.submit = Button(self.renamerwindow, text='Submit', font=("Arial", 32), command=se | lf.getReplacement)
self.submit.pack()
def getEmailtext(self):
| self.body = self.e1.get()
utils.Emailz.send_email(self.body)
self.emailwindow.destroy()
def emailCallBack(self):
self.emailwindow = Toplevel()
self.emailwindow.title('Emailer')
self.emailwindow.focus_set()
self.emailLabel = Label(self.emailwindow, text="Enter email text", font=("Arial", 32)).pack()
self.e1 = Entry(self.emailwindow)
self.e1.pack()
self.submit = Button(self.emailwindow, text='Submit', font=("Arial", 32), command=self.getEmailtext)
self.submit.pack()
def closehelp(self):
self.helpLabel.destroy()
self.helpclosebutton.destroy()
def helpCallBack(self):
#self.helpwindow = Toplevel()
self.helpLabel = Label(self.top, text = utils.help_message, font=("Arial", 12))
self.helpLabel.pack()
self.helpclosebutton = Button(self.top, text='Close', font=("Arial", 18), command=self.closehelp)
self.helpclosebutton.pack()
def search4soundsCallback(self):
pass
def search4soundsSubmitCallback(self):
pass
topwindow = MainWindow(top)
top.mainloop()
|
prefix = "tabulator-"
delete = self.__workbook_cache is None
source_bytes = self.__loader.load(source, mode="b", encoding=encoding)
target_bytes = NamedTemporaryFile(prefix=prefix, delete=delete)
shutil.copyfileobj(source_bytes, target_bytes)
source_bytes.close()
target_bytes.seek(0)
self.__bytes = target_bytes
if self.__workbook_cache is not None:
self.__workbook_cache[source] = target_bytes.name
atexit.register(os.remove, target_bytes.name)
# Local
else:
self.__bytes = self.__loader.load(source, mode="b", encoding=encoding)
# Get book
# To fill merged cells we can't use read-only because
# `sheet.merged_cell_ranges` is not available in this mode
self.__book = openpyxl.load_workbook(
self.__bytes, read_only=not self.__fill_merged_cells, data_only=True
)
# Get sheet
try:
if isinstance(self.__sheet_pointer, six.string_types):
self.__sheet = self.__book[self.__sheet_pointer]
else:
self.__sheet = self.__book.worksheets[self.__sheet_pointer - 1]
except (KeyError, IndexError):
message = 'Excel document "%s" doesn\'t have a sheet "%s"'
raise exceptions.SourceError(message % (source, self.__sheet_pointer))
self.__fragment = self.__sheet.title
self.__process_merged_cells()
# Reset parser
self.reset()
def close(self):
if not self.closed:
self.__bytes.close()
def reset(self):
helpers.reset_stream(self.__bytes)
self.__extended_rows = self.__iter_extended_rows()
@property
def encoding(self):
return self.__encoding
@property
def fragment(self):
return self.__fragment
@property
def extended_rows(self):
return self.__extended_rows
# Private
def __iter_extended_rows(self):
for row_number, row in enumerate(self.__sheet.iter_rows(), start=1):
yield (
row_number,
None,
extract_row_values(
row, self.__preserve_formatting, self.__adjust_floating_point_error,
),
)
def __process_merged_cells(self):
if self.__fill_merged_cells:
for merged_cell_range in list(self.__sheet.merged_cells.ranges):
merged_cell_range = str(merged_cell_range)
self.__sheet.unmerge_cells(merged_cell_range)
merged_rows = openpyxl.utils.rows_from_range(merged_cell_range)
coordinates = list(chain.from_iterable(merged_rows))
value = self.__sheet[coordinates[0]].value
for coordinate in coordinates:
cell = self.__sheet[coordinate]
cell.value = value
# Internal
EXCEL_CODES = {
"yyyy": "%Y",
"yy": "%y",
"dddd": "%A",
"ddd": "%a",
"dd": "%d",
"d": "%-d",
# Different from excel as there is no J-D in strftime
"mmmmmm": "%b",
"mmmm": "%B",
"mmm": "%b",
"hh": "%H",
"h": "%-H",
"ss": "%S",
"s": "%-S",
# Possibly different from excel as there is no am/pm in strftime
"am/pm": "%p",
# Different from excel as there is no A/P or a/p in strftime
"a/p": "%p",
}
EXCEL_MINUTE_CODES = {
"mm": "%M",
"m": "%-M",
}
EXCEL_MONTH_CODES = {
"mm": "%m",
"m": "%-m",
}
EXCEL_MISC_CHARS = [
"$",
"+",
"(",
":",
"^",
"'",
"{",
"<",
"=",
"-",
"/",
")",
"!",
"&",
"~",
"}",
">",
" ",
]
EXCEL_ESCAPE_CHAR = "\\"
EXCEL_SECTION_DIVIDER = ";"
def convert_excel_date_format_string(excel_date):
"""
Created using documentation here:
https://support.office.com/en-us/article/review-guidelines-for-customizing-a-number-format-c0a1d1fa-d3f4-4018-96b7-9c9354dd99f5
"""
# The python date string that is being built
python_date = ""
# The excel code currently being parsed
excel_code = ""
prev_code = ""
# If the previous character was the escape character
char_escaped = False
# If we are in a quotation block (surrounded by "")
quotation_block = False
# Variables used for checking if a code should be a minute or a month
checking_minute_or_month = False
minute_or_month_buffer = ""
for c in excel_date:
ec = excel_code.lower()
# The previous character was an escape, the next character should be added normally
if char_escaped:
if checking_minute_or_month:
minute_or_month_buffer += c
else:
python_date += c
char_escaped = False
continue
# Inside a quotation block
if quotation_block:
if c == '"':
# Quotation block should now end
quotation_block = False
elif checking_minute_or_month:
minute_or_month_buffer += c
else:
python_date += c
continue
# The start of a quotation block
if c == '"':
quotation_block = True
continue
if c == EXCEL_SECTION_DIVIDER:
# We ignore excel sections for datetimes
break
is_escape_char = c == EXCEL_ESCAPE_CHAR
# The am/pm and a/p code add some complications, need to make sure we are not that code
is_misc_char = c in EXCEL_MISC_CHARS and (
c != "/" or (ec != "am" and ec != "a")
)
new_excel_code = False
# Handle a new code without a different characeter in between
if (
ec
and not is_escape_char
and not is_misc_char
# If the code does not start with c, we are in a new code
and not ec.startswith(c.lower())
# other than the case where we are building up
# am/pm (minus the case where it is fully built), we are in a new code
and (not ec.startswith("a") or ec == "am/pm")
):
new_excel_code = True
# Code is finished, check if it is a proper code
if (is_escape_char or is_misc_char or new_excel_code) and ec:
# Checking if the previous code should have been minute or month
if checking_minute_or_month:
if ec == "ss" or ec == "s":
# It should be a minute!
minute_or_month_buffer = (
EXCEL_MINUTE_CODES[prev_code] + minute_or_month_buffer
)
else:
# It should be a months!
minute_or_month_buffer = (
EXCEL_MONTH_CODES[prev_code] + minute_or_month_buffer
)
python_date += minute_or_month_buffer
checking_minute_or_month = False
minute_or_month_buffer = ""
if ec in EXCEL_CODES:
python_date += EXCEL_CODES[ec]
# Handle months/minutes differently
elif ec in EXCEL_MINUTE_CODES:
# If preceded by hours, we know this is referring to minutes
if prev_code == "h" or prev_code == "hh":
python_date += EXCEL_MINUTE_CODES[ec]
else:
# Have to check if the next code is ss or s
checking_minute_or_month = True
minute_or_month_buffer = ""
else:
# Have | to abandon this attempt to convert because the code is not recognized
return None
prev_code = ec
excel_code = ""
if is_escap | e_char:
char_escaped = True
elif is_misc_char:
# Add the misc char
if checking_minute_or_month:
minute_or_month_buffer += c
else:
python_date += c
else:
# Just add to the cod |
import torch
from deluca.lung.core import Controller, LungEnv
class PIDCorrection(Controller):
def __init__(self, base_controller: Controller, sim: LungEnv, pid_K=[0.0, 0.0], decay=0.1, **kwargs):
self.base_controller = base_controller
self.sim = sim
self.I = 0.0
self.K = pid_K
self.decay = decay
self.reset()
def reset(self):
self.base_controller.reset()
self.sim.reset()
self.I = 0.0
| def compute_action(self, state, t):
u_in_base, u_out = self.base_controller(state, t)
err = self.sim.pressure - state
self.I = self.I * (1 - self.decay) + err * self.decay
pid_correction = self.K[0] * err + self.K[1] * self.I
u_in = torch.clamp(u_in_base + pid_correction, min=0.0, max=100.0)
self.sim(u_in | , u_out, t)
return u_in, u_out
|
"""
Tests for split's copy_from_template method.
Currently it is only used for content libraries.
However for these tests, we make sure it also works when copying from course to course.
"""
import ddt
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.tests.utils import MixedSplitTestCase
'''
TODO: Update
@ddt.ddt
class TestSplitCopyTemplate(MixedSplitTestCase):
"""
Test for split's copy_from_template method.
"""
@ddt.data(
LibraryFactory,
CourseFactory,
)
def test_copy_from_template(self, source_type):
"""
Test that the behavior of copy_from_template() matches its docstring
"""
source_container = source_type.create(modulestore=self.store) # Either a library or a course
course = CourseFactory.create(modulestore=self.store)
# Add a vertical with a capa child to the source library/course:
vertical_block = self.make_block("vertical", source_container)
html_library_display_name = "HTML Display Name"
html_block = self.make_block("html", vertical_block, display_name=html_library_display_name)
if source_type == LibraryFactory:
source_container = self.store.get_library(
source_container.location.library_key, remove_version=False, remove_branch=False
)
else:
source_container = self.store.get_course(
source_container.location.course_key, remove_version=False, remove_branch=False
)
# Inherit the vertical and the problem from the library into the course:
source_keys = [source_container.children[0]]
new_blocks = self.store.copy_from_template(source_keys, dest_key=course.location, user_id=self.user_id)
self.assertEqual(len(new_blocks), 1)
course = self.store.get_course(course.location.course_key) # Reload from modulestore
self.assertEqual(len(course.children), 1)
vertical_block_course = self.store.get_item(course.children[0])
self.assertEqual(new_blocks[0], vertical_block_course.location)
html_block_course = self.store.get_item(vertical_block_course.children[0])
self.assertEqual(html_block_course.display_name, html_library_display_name)
# Override the display_name:
new_display_name = "The Trouble with Tribbles"
html_block_course.display_name = new_display_name
self.store.update_item(html_block_course, self.user_id)
# Test that "Any previously existing children of `dest_usage`
# that haven't been replaced/updated by this copy_from_template operation will be deleted."
extra_block = self.make_block("html", vertical_block_course)
# Repeat the copy_from_template():
new_blocks2 = self.store.copy_from_template(source_keys, dest_key=course.location, user_id=self.user_id)
self.assertEqual(new_blocks, new_blocks2)
# Reload problem_block_course:
html_block_course = self.store.get_item(html_block_course.location)
self.assertEqual(html_block_course.display_name, new_display_name)
# Ensure that extra_block was deleted:
vertical_block_course = self.store.get_item(new_blocks2[0])
self.assertEqual(len(vertical_block_course.children), 1)
with self.assertRaises(ItemNotFoundError):
self.store.get_item(extra_block.location)
def test_copy_from_template_publish(self):
"""
Test that copy_from_template's "defaults" data is not lost
when blocks are published.
"""
# Create a library with an html:
source_library = LibraryFactory.create(modulestore=self.store)
display_name_expected = "CUSTOM Library Display Name"
self.make_block("html", source_library, display_name=display_name_expected)
# Reload source_library since we need its branch and version to use copy_from_template:
source_library = self.store.get_library(
source_library.location.library_key, remove_version=False, remove_branch=False
)
# And a course with a vertical:
course = CourseFactory.create(modulestore=self.store)
self.make_block("vertical", course)
html_key_in_course = self.store.copy_from_template(
source_library.children, dest_key=course.location, user_id=self.user_id
)[0]
# We do the following twice because different methods get used inside
# split modulestore on first vs. subsequent publish
for __ in range(2):
# Publish:
self.store.publish(html_key_in_course, self.user_id)
# Test that the defaults values are there.
problem_published = self.store.get_item(
html_key_in_course.for_branch(ModuleStoreEnum.BranchName.publ | ished)
)
self.assertEqual(problem_published.display_name, display_name_expected)
def test_copy_from_template_auto_publish(self):
"""
Make sure that copy_from_template works with things like 'chapter' that
are always auto-published.
"""
source_course = CourseFactory.create(modulestore | =self.store)
course = CourseFactory.create(modulestore=self.store)
# Populate the course:
about = self.make_block("about", source_course)
chapter = self.make_block("chapter", source_course)
sequential = self.make_block("sequential", chapter)
# And three blocks that are NOT auto-published:
vertical = self.make_block("vertical", sequential)
html = self.make_block("html", source_course)
# Reload source_course since we need its branch and version to use copy_from_template:
source_course = self.store.get_course(
source_course.location.course_key, remove_version=False, remove_branch=False
)
# Inherit the vertical and the html from the library into the course:
source_keys = [block.location for block in [about, chapter, html]]
block_keys = self.store.copy_from_template(source_keys, dest_key=course.location, user_id=self.user_id)
self.assertEqual(len(block_keys), len(source_keys))
# Build dict of the new blocks in 'course', keyed by category (which is a unique key in our case)
new_blocks = {}
block_keys = set(block_keys)
while block_keys:
key = block_keys.pop()
block = self.store.get_item(key)
new_blocks[block.category] = block
block_keys.update(set(getattr(block, "children", [])))
# Check that auto-publish blocks with no children are indeed published:
def published_version_exists(block):
""" Does a published version of block exist? """
try:
self.store.get_item(block.location.for_branch(ModuleStoreEnum.BranchName.published))
return True
except ItemNotFoundError:
return False
# Check that the auto-publish blocks have been published:
self.assertFalse(self.store.has_changes(new_blocks["about"]))
# We can't use has_changes because it includes descendants
self.assertTrue(published_version_exists(new_blocks["chapter"]))
self.assertTrue(published_version_exists(new_blocks["sequential"])) # Ditto
# Check that non-auto-publish blocks and blocks with non-auto-publish descendants show changes:
self.assertTrue(self.store.has_changes(new_blocks["html"]))
# Will have changes since a child block has changes.
self.assertTrue(self.store.has_changes(new_blocks["chapter"]))
# Verify that our published_version_exists works
self.assertFalse(published_version_exists(new_blocks["vertical"]))
'''
|
#import os
import sys
import time
import xmltodict
import pprint
pp = pprint.PrettyPrinter(indent=4,stream=sys.stderr)
testing = False
# def poll_co | ndor(jonbr, bagnr):
def poll_condor(filename):
# filename = "hist-%d-%d.xml" % ( jobnr, bagnr )
# command = "condor_history -constraint 'HtcJob == %d && | HtcBag == %d' -xml > %s" % ( jobnr, bagnr, filename )
# os.system( command )
tries = 0
poll_dict = {}
while tries < 4:
tries += 1
_trystr = "Try %d (%s) :" % (tries, filename)
xml = open(filename).read()
xmldict = xmltodict.parse(xml)
print >> sys.stderr, "type(xmldict) = ", type(xmldict)
if not ( type(xmldict) == dict and xmldict.has_key('classads') ):
print >> sys.stderr, _trystr, "No classads, wait a little until the first results come in"
time.sleep(2)
continue
print >> sys.stderr, "type(xmldict['classads']) = ", type(xmldict['classads'])
if not ( type(xmldict['classads']) == dict and xmldict['classads'].has_key('c') ) :
print >> sys.stderr, _trystr, "No classads <c> entries, wait a little until the first results come in"
time.sleep(2)
continue
print >> sys.stderr, "type(xmldict['classads']['c']) = ", type(xmldict['classads']['c'])
if not ( type(xmldict['classads']['c']) == list and xmldict['classads']['c'][0].has_key('a') ) :
print >> sys.stderr, _trystr, "No classads attributes, wait a little until the first results come in"
time.sleep(2)
continue
poll_dict = get_poll_dict(xmldict)
break
# if poll_dict['CompletedTasks'] == poll_dict['TotalTask']:
#pp.pprint(xmldict)
return poll_dict
def get_poll_dict(xmldict):
if testing:
print >> sys.stderr, "selecting info from file %s, job %s, bag %s" % (filename, jobnr, bagnr)
res_dict = {}
# print >> sys.stderr, xml
# print "----"
# jobid = 0
for c in xmldict['classads']['c']:
tempdict = {}
# pp.pprint(c)
attrs=c['a']
# pp.pprint(attrs)
for d in attrs:
v = None
k = d['@n'].encode('ascii', 'ignore') # get rid of unicode from xmltodict
# handle float
if d.has_key('r'):
v=float( d['r'].encode('ascii', 'ignore') ) # get rid of unicode from xmltodict
# handle int
if d.has_key('i'):
v=int( d['i'].encode('ascii', 'ignore') ) # get rid of unicode from xmltodict
# handle string
if d.has_key('s'):
# pp.pprint(d)
if d['s'] == None:
v = 'None'
else:
v= d['s'].encode('ascii', 'ignore') # get rid of unicode from xmltodict
# handle boolean
if d.has_key('b'):
# pp.pprint(d)
v= 'True' if d['b']['@v'] == 't' else 'False'
# handle expression
if d.has_key('e'):
v= d['e'].encode('ascii', 'ignore') # get rid of unicode from xmltodict
if v != None:
tempdict[k] = v
else:
print "unknown datatype in "
pp.pprint(d)
attrdict = {}
for k in [ 'HtcJob', 'HtcBag', 'HtcTask',
'RemoteWallClockTime', 'Cmd',
'MATCH_EXP_MachineCloudMachineType' ]:
if tempdict.has_key(k):
attrdict[k] = tempdict[k]
#print kl
# cur_jobnr = "%(HtcJob)s" % tempdict
# if not ( jobnr == None or jobnr == cur_jobnr):
# continue
# cur_bagnr = "%(HtcBag)s" % tempdict
# if not ( bagnr == None or bagnr == cur_bagnr):
# continue
# tasknr = "%(HtcTask)s" % taskdict
taskid = "%(HtcJob)s.%(HtcBag)s.%(HtcTask)s" % tempdict
#jobid += 1
# print "----"
if res_dict.has_key(taskid):
res_dict[taskid].append ( attrdict )
else:
res_dict[taskid] = [ attrdict ]
if testing:
print >> sys.stderr, "====== res_dict ======"
pp.pprint(res_dict)
print >> sys.stderr, "------ res_dict ------"
return res_dict
"""
{ 'tasks':
{
taskid:
[
{
attr1: val1,
attrn: valn,
},
{
attr1: val1,
attrn: valn,
}
]
}
}
"""
def do_test(filename):
poll_dict = poll_condor(filename)
completed_tasks = 0
for _ in poll_dict.keys():
completed_tasks += len(poll_dict[_])
completed_task_sets = poll_dict.keys().__len__()
print >> sys.stderr, "Found %d completed tasks in %d sets" % (completed_tasks, completed_task_sets)
if False:
pp.pprint(poll_dict)
if __name__ == "__main__":
pp = pprint.PrettyPrinter(indent=4,stream=sys.stderr)
testing = True
usage = "usage : %s ClassAd_XML_file [ jobnr [ bagnr ] ]" % sys.argv[0]
argc = len(sys.argv)
jobnr = None
bagnr = None
print "%d args" % argc
if argc <= 1:
print usage
filename = "test3.xml"
if argc >= 2:
filename = sys.argv[1]
print "file = %s" % filename
if argc >= 3:
jobnr = sys.argv[2]
print "job = %s" % jobnr
if argc >= 4:
bagnr = sys.argv[3]
print "bag = %s" % bagnr
for _ in [ "test1.xml", "test2.xml", "test3.xml", "test4.xml" ] :
do_test( _ )
|
import sys
import logging
logger = logg | ing.getLogger(__name__)
def configure_logging():
root = logging.getLogger()
root.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(name)12s %(levelname)7s - %(message)s')
handler.setFormatter(form | atter)
root.addHandler(handler)
|
from datetime import datetime
import random
import string
from bson import ObjectId
class DuplicateUserException(Exception):
def __init__(self, message='User name/email already exits'):
Exception.__init__(self, message)
pass
class UserServiceException(Exception):
def __init__(self, message=None):
Exception.__init__(self, message)
@classmethod
def cannot_delete_super_admin(cls):
return UserServiceException("Cannot delete super admin user!")
class UserService(object):
def __init__(self, db):
self.db = db
self.users = self.db.user_collection
def generate_api_key(self):
s = string.ascii_letters + string.digits
return ''.join(random.sample(s, 20))
def create(self, item):
if self.user_exists(item['email']):
raise DuplicateUserException()
item.pop('_id', None)
item['created_at'] = datetime.now()
item['status'] = True
if 'api_key' not in item:
item['api_key'] = self.generate_api_key()
if 'ro | les' not in item or item['roles'] is None or len(item['roles']) == 0:
item['roles'] = ['member']
return self.users.insert | (item)
def get_by_email(self, email):
return self.users.find_one({"email": email})
def validate_user(self, username, password):
query = {'email': username, 'password': password}
return self.users.find(query).count() > 0
def search(self, email=None):
query = {}
if email is not None:
query['email'] = email
return [x for x in self.users.find(query)]
def delete(self, id):
item = self.get_by_id(id)
if item and 'roles' in item and item['roles'] is not None and 'super_admin' in item['roles']:
raise UserServiceException.cannot_delete_super_admin()
return self.users.remove({"_id": ObjectId(id)})
def get_by_id(self, id):
return self.users.find_one({"_id": ObjectId(id)})
def get_by_api_key(self, api_key):
return self.users.find_one({"api_key": api_key})
def update(self, item):
if item['_id'] is None:
return item
if self.user_exists(item['email'], str(item['_id'])):
raise DuplicateUserException()
item['updated_at'] = datetime.now()
self.users.save(item)
return item
def user_exists(self, email, id=None):
query = {}
if id is not None:
query = {"_id": {"$ne": ObjectId(id)}}
query['email'] = email
return self.users.find(query).count() > 0
|
ight])
if not nodes:
nodes.append(node)
else:
pos = 0
while pos < len(nodes) and nodes[pos].weight > node.weight:
pos += 1
nodes.insert(pos, node)
top = nodes[0]
tree = Tree(top)
tree.reduce(15)
codes = tree.codes()
code_items = list(codes.items())
code_items.sort(key=lambda item:(len(item[1]), item[0]))
return [(car, len(value)) for car, value in code_items]
def normalized(codelengths):
car, codelength = codelengths[0]
value = 0
codes = {car: "0" * codelength}
for (newcar, nbits) in codelengths[1:]:
value += 1
bvalue = str(bin(value))[2:]
bvalue = "0" * (codelength - len(bvalue)) + bvalue
if nbits > codelength:
codelength = nbits
bvalue += "0" * (codelength - len(bvalue))
value = int(bvalue, 2)
assert len(bvalue) == nbits
codes[newcar] = bvalue
return codes
class Tree:
def __init__(self, root):
self.root = root
self.nb_levels = 0
def length(self):
self.root.level = 0
node = self.root
nb_levels = 0
def set_level(node):
nonlocal nb_levels
for child in node.children:
child.level = node.level + 1
nb_levels = max(nb_levels, child.level)
if not child.is_leaf:
set_level(child)
set_level(self.root)
return nb_levels
def reduce_tree(self):
"""Change the tree to reduce the number of levels.
Uses the algorithm described in
http://compressions.sourceforge.net/Huffman.html#3
"""
currentlen = self.length()
deepest = self.nodes_at(currentlen)
deepest_leaves = [node for node in deepest if node.is_leaf]
rightmost_leaf = deepest_leaves[-1]
sibling = rightmost_leaf.parent.children[0]
# replace rightmost_leaf's parent by rightmost_leaf
parent = rightmost_leaf.parent
grand_parent = parent.parent
rank = grand_parent.children.index(parent)
children = grand_parent.children
children[rank] = rightmost_leaf
grand_parent.add(children)
# find first upper level with leaves
up_level = rightmost_leaf.level - 2
while up_level > 0:
nodes = self.nodes_at(up_level)
leaf_nodes = [node for node in nodes if node.is_leaf]
if leaf_nodes:
leftmost_leaf = leaf_nodes[0]
# replace by node with leaves = [sibling, leftmost_leaf]
parent = leftmost_leaf.parent
rank = parent.children.index(leftmost_leaf)
new_node = Node()
new_node.level = leftmost_leaf.level
children = [sibling, leftmost_leaf]
new_node.add(children)
parent.children[rank] = new_node
new_node.parent = parent
break
else:
up_level -= 1
if up_level == 0:
raise ResizeError
def nodes_at(self, level, top=None):
"""Return list of all the nodes below top at specified level."""
res = []
if top is None:
top = self.root
if top.level == level:
res = [top]
elif not top.is_leaf:
for child in top.children:
res += self.nodes_at(level, child)
return res
def reduce(self, maxlevels):
"""Reduce number of levels to maxlevels, if possible."""
while self.length() > maxlevels:
self.reduce_tree()
def codes(self, node=None, code=''):
"""Returns a dictionary mapping leaf characters to the Huffman code
of the node, as a string of 0's and 1's."""
if node is None:
self.dic = {}
node = self.root
if node.is_leaf:
self.dic[node.char] = code
else:
for i, child in enumerate(node.children):
self.codes(child, code + str(i))
return self.dic
class Node:
def __init__(self, char=None, weight=0, level=0):
self.char = char
self.is_leaf = char is not None
self.level = level
self.weight = weight
self.height = 0
def add(self, children):
self.children = children
for child in self.children:
child.parent = self
child.level = self.level + 1
self.height = max(self.height, children[0].height + 1,
children[1].height + 1)
node = self
while hasattr(node, "parent"):
node.parent.height = max(node.parent.height, node.height + 1)
node = node.parent
def __repr__(self):
if self.is_leaf:
return f'{chr(self.char)!r}'
else:
return f'{self.children}'
class Compresser:
def __init__(self, text):
if not isinstance(text, (bytes, bytearray, memoryview)):
raise TypeError("a bytes-like object is required, not '" +
type(text).__name__ + "'")
self.text = text
freqs = {}
for car in self.text:
freqs[car] = freqs.get(car, 0) + 1
self.codelengths = codelengths_from_frequencies(freqs)
self.codes = normalized(self.codelengths)
self.max_codelength = max(len(v) for v in self.codes.values())
def compressed_bytes(self):
compressed = self.compressed_str() + self.codes[256]
out = bytearray()
| pos = 0
while pos < len(compressed):
bits = compressed[pos | :pos + 8]
byte = int(bits, 2)
if len(bits) < 8:
byte <<= (8 - len(bits))
out.append(byte)
pos += 8
return out
def compressed_str(self):
return ''.join(self.codes[car] for car in self.text)
class Decompresser:
def __init__(self, compressed, codelengths):
self.compressed = compressed
codes = normalized(codelengths)
self.codes = {value : key for key, value in codes.items()}
self.root = Node()
self.make_tree(self.root)
def make_tree(self, node):
if node is self.root:
node.code = ''
children = []
for bit in '01':
next_code = node.code + bit
if next_code in self.codes:
child = Node(char=self.codes[next_code])
else:
child = Node()
child.code = next_code
children.append(child)
node.add(children)
for child in children:
if not child.is_leaf:
self.make_tree(child)
def decompress(self):
source = self.compressed
if isinstance(source, (bytes, bytearray)):
return self.decompress_bytes()
pos = 0
node = self.root
res = bytearray()
while pos < len(source):
code = int(source[pos])
child = node.children[code]
if child.is_leaf:
res.append(child)
node = self.root
else:
node = child
pos += 1
return bytes(res)
def decompress_bytes(self):
source = self.compressed
pos = 0
node = self.root
res = bytearray()
while pos < len(source):
byte = source[pos]
mask = 128
while mask > 0:
code = bool(byte & mask)
child = node.children[code]
if child.is_leaf:
if child.char == 256:
break # end of block
res.append(child.char)
node = self.root
else:
node = child
mask >>= 1
pos += 1
return res
def compress(text, klass=bytes):
compr = Compresser(text)
result = {"codelengths": compr.codelengths}
if klass is bytes:
result["data"] = compr.compressed_bytes()
elif klass is str:
result["data"] = compr.compressed_str()
|
# -*- coding: utf-8 -*-
# Licence: GPL v.3 http://www.gnu.org/licenses/gpl.html
# This is an XBMC addon for demonstrating the capabilities
# and usage of PyXBMCt framework.
import os
import xbmc
import xbmcaddon
import pyxbmct
from lib import utils
import plugintools
from itertools import tee, islice, chain, izip
_addon = xbmcaddon.Addon()
_addon_path = _ | addon.getAddonInfo('path')
# Enable or disable Estuary-based design ex | plicitly
# pyxbmct.skin.estuary = True
def previous_and_next(some_iterable):
prevs, items, nexts = tee(some_iterable, 3)
prevs = chain([None], prevs)
nexts = chain(islice(nexts, 1, None), [None])
return izip(prevs, items, nexts)
class categorySelectDialog(pyxbmct.AddonDialogWindow):
def __init__(self, title='', categories=None):
super(categorySelectDialog, self).__init__(title)
self.categories = categories
self.listOfRadioButtons = []
self.radioMap = {}
maxRows = len(categories)
self.setGeometry(400, 600, maxRows+1, 1)
self.set_active_controls()
self.set_navigation()
# Connect a key action (Backspace) to close the window.
self.connect(pyxbmct.ACTION_NAV_BACK, self.close)
def set_active_controls(self):
row = 0
for category in self.categories:
for catId in category:
catName = category[catId]
radiobutton = pyxbmct.RadioButton(catName)
catSetting = plugintools.get_setting(catName)
self.placeControl(radiobutton, row, 0)
self.connect(radiobutton, self.radio_update)
if catSetting == True:
radiobutton.setSelected(True)
else:
radiobutton.setSelected(False)
self.listOfRadioButtons.append(radiobutton)
self.radioMap[catName] = radiobutton
row = row + 1
self.close_button = pyxbmct.Button('Close')
self.placeControl(self.close_button, row, 0)
self.connect(self.close_button, self.close)
from itertools import tee, islice, chain, izip
def set_navigation(self):
for previous, item, nextItem in previous_and_next(self.listOfRadioButtons):
if previous != None:
item.controlUp(previous)
if nextItem != None:
item.controlDown(nextItem)
if nextItem == None:
item.controlDown(self.close_button)
self.close_button.controlUp(item)
# length = len(self.listOfRadioButtons)
# obj = self.listOfRadioButtons[length-1]
# item.controlDown(self.close_button)
self.setFocus(self.listOfRadioButtons[0])
def radio_update(self):
# Update radiobutton caption on toggle
# utils.log('entered radio update ' + str(listPos))
# radioButton = self.listOfRadioButtons[listPos]
radioButton = self.getFocus()
for catName, radioButtonItem in self.radioMap.iteritems():
if radioButton == radioButtonItem:
label = catName
if radioButton.isSelected():
plugintools.set_setting(label, 'True')
else:
plugintools.set_setting(label, 'False')
|
# Copyright (C) 2017 Daniel Wa | tkins <daniel@daniel-watkins.co.uk>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use th | is file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
class Image(meta.Entity):
data = meta.Bytes | ()
| |
from __future__ import print_function,division
# duner. using numbers and sample.
"""
q +-----+ r +-----+
---->| C |---->| D |--> s
^ +-----+ +-+---+
| |
+-----------------+
C = stock of clean diapers
D = stock of dirty diapers
q = inflow of clean diapers
r = flow of clean diapers to dirty diapers
s = out-flow of dirty diapers
"""
class o:
def has(i) : return i.__dict__
def __init__(i,**d) : i.has().update(d)
def copy(i) : return o(**i.has().copy())
def __getitem__(i,k): return i.has()[k]
def __setitem__(i,k,v): i.has()[k] = v
def __repr__(i) : return 'o'+str(i.has())
def sim(state0,life=100,spy=False,dt=1):
t= 0
while t < life:
t += dt
state1 = state0.copy()
yield dt, t,state0,state1
state0 = state1
for key in state1.has().keys():
if state1[key] < 0:
state1[key] = 0
if spy:
print(t,state1)
def diapers():
def saturday(x): return int(x) % 7 == 6
world = o(C=20, D=0,q=0, r=8, | s=0)
for dt,t,u,v in sim(world,life=60,spy=True,dt=0.5):
v.C += dt*(u.q - u.r)
v.D += dt*(u.r - u.s)
v.q = 70 if saturday(t) else 0
v.s = u.D if saturday(t) else 0
if t == 27: # special case (the | day i forget)
v.s = 0
diapers()
|
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Fou | ndation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob | is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from .base import IBaseCap, CapBaseObject, StringField, IntField, Field, empty
import lxml.etree as ET
import base64
import re
import urllib
__all__ = ['Recipe', 'ICapRecipe']
class Comment():
def __init__(self, author=None, rate=None, text=None):
self.author = author
self.rate = rate
self.text = text
def __str__(self):
result = u''
if self.author:
result += 'author: %s, ' % self.author
if self.rate:
result += 'note: %s, ' % self.rate
if self.text:
result += 'comment: %s' % self.text
return result
class Recipe(CapBaseObject):
"""
Recipe object.
"""
title = StringField('Title of the recipe')
author = StringField('Author name of the recipe')
thumbnail_url = StringField('Direct url to recipe thumbnail')
picture_url = StringField('Direct url to recipe picture')
short_description = StringField('Short description of a recipe')
nb_person = Field('The recipe was made for this amount of persons', list)
preparation_time = IntField('Preparation time of the recipe in minutes')
cooking_time = IntField('Cooking time of the recipe in minutes')
ingredients = Field('Ingredient list necessary for the recipe', list)
instructions = StringField('Instruction step list of the recipe')
comments = Field('User comments about the recipe', list)
def __init__(self, id, title):
CapBaseObject.__init__(self, id)
self.title = title
def toKrecipesXml(self, author=None):
"""
Export recipe to KRecipes XML string
"""
sauthor = u''
if not empty(self.author):
sauthor += '%s@' % self.author
if author is None:
sauthor += 'Cookboob'
else:
sauthor += author
header = u'<?xml version="1.0" encoding="UTF-8" ?>\n'
initial_xml = '''\
<krecipes version='2.0-beta2' lang='fr' xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' xsi:noNamespaceSchemaLocation='krecipes.xsd'>
<krecipes-recipe id='1'>
</krecipes-recipe>
</krecipes>'''
doc = ET.fromstring(initial_xml)
recipe = doc.find('krecipes-recipe')
desc = ET.SubElement(recipe, 'krecipes-description')
title = ET.SubElement(desc, 'title')
title.text = self.title
authors = ET.SubElement(desc, 'author')
authors.text = sauthor
eyield = ET.SubElement(desc, 'yield')
if not empty(self.nb_person):
amount = ET.SubElement(eyield, 'amount')
if len(self.nb_person) == 1:
amount.text = '%s' % self.nb_person[0]
else:
mini = ET.SubElement(amount, 'min')
mini.text = u'%s' % self.nb_person[0]
maxi = ET.SubElement(amount, 'max')
maxi.text = u'%s' % self.nb_person[1]
etype = ET.SubElement(eyield, 'type')
etype.text = 'persons'
if not empty(self.preparation_time):
preptime = ET.SubElement(desc, 'preparation-time')
preptime.text = '%02d:%02d' % (self.preparation_time / 60, self.preparation_time % 60)
if not empty(self.picture_url):
data = urllib.urlopen(self.picture_url).read()
datab64 = base64.encodestring(data)[:-1]
pictures = ET.SubElement(desc, 'pictures')
pic = ET.SubElement(pictures, 'pic', {'format': 'JPEG', 'id': '1'})
pic.text = ET.CDATA(datab64)
if not empty(self.ingredients):
ings = ET.SubElement(recipe, 'krecipes-ingredients')
pat = re.compile('^[0-9]*')
for i in self.ingredients:
sname = u'%s' % i
samount = ''
sunit = ''
first_nums = pat.match(i).group()
if first_nums != '':
samount = first_nums
sname = i.lstrip('0123456789 ')
ing = ET.SubElement(ings, 'ingredient')
am = ET.SubElement(ing, 'amount')
am.text = samount
unit = ET.SubElement(ing, 'unit')
unit.text = sunit
name = ET.SubElement(ing, 'name')
name.text = sname
if not empty(self.instructions):
instructions = ET.SubElement(recipe, 'krecipes-instructions')
instructions.text = self.instructions
if not empty(self.comments):
ratings = ET.SubElement(recipe, 'krecipes-ratings')
for c in self.comments:
rating = ET.SubElement(ratings, 'rating')
if c.author:
rater = ET.SubElement(rating, 'rater')
rater.text = c.author
if c.text:
com = ET.SubElement(rating, 'comment')
com.text = c.text
crits = ET.SubElement(rating, 'criterion')
if c.rate:
crit = ET.SubElement(crits, 'criteria')
critname = ET.SubElement(crit, 'name')
critname.text = 'Overall'
critstars = ET.SubElement(crit, 'stars')
critstars.text = c.rate.split('/')[0]
return header + ET.tostring(doc, encoding='UTF-8', pretty_print=True).decode('utf-8')
class ICapRecipe(IBaseCap):
"""
Recipe providers.
"""
def iter_recipes(self, pattern):
"""
Search recipes and iterate on results.
:param pattern: pattern to search
:type pattern: str
:rtype: iter[:class:`Recipe`]
"""
raise NotImplementedError()
def get_recipe(self, _id):
"""
Get a recipe object from an ID.
:param _id: ID of recipe
:type _id: str
:rtype: :class:`Recipe`
"""
raise NotImplementedError()
|
"RESTRICT")
# Make available for S3Models
s3.role_required = role_required
s3.roles_permitted = roles_permitted
# =============================================================================
# Other reusable fields
# -----------------------------------------------------------------------------
# Reusable comments field to include in other table definitions
s3_comments = S3ReusableField("comments", "text",
label = T("Comments"),
widget = s3_comments_widget,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Comments"),
T("Please use this field to record any additional information, including a history of the record if it is updated."))))
s3.comments = s3_comments
# -----------------------------------------------------------------------------
# Reusable currency field to include in other table definitions
#
# @ToDo: Move to a Finance module
#
currency_type_opts = deployment_settings.get_fin_currencies()
default_currency = deployment_settings.get_fin_currency_default()
currency_type = S3ReusableField("currency_type", "string",
length = 3,
#notnull=True,
requires = IS_IN_SET(currency_type_opts.keys(),
zero=None),
default = default_currency,
label = T("Currency"),
#represent = lambda opt: \
# currency_type_opts.get(opt, UNKNOWN_OPT),
writable = deployment_settings.get_fin_currency_writable())
response.s3.currency_type = currency_type
# =============================================================================
# Lx
#
# These fields are populated onaccept from location_id
# - for many reads to fewer writes, this is faster than Virtual Fields
#
# Labels that vary by country are set by gis.update_table_hierarchy_labels()
#
address_L4 = S3ReusableField("L4",
#label=gis.get_location_hierarchy("L4"),
readable=False,
writable=False)
address_L3 = S3ReusableField("L3",
#label=gis.get_location_hierarchy("L3"),
readable=False,
writable=False)
address_L2 = S3ReusableField("L2",
#label=gis.get_location_hierarchy("L2"),
readable=False,
writable=False)
address_L1 = S3ReusableField("L1",
#label=gis.get_location_hierarchy("L1"),
readable=False,
writable=False)
address_L0 = S3ReusableField("L0",
# L0 Location Name never varies except with a Translation
label=T("Country"),
readable=False,
writable=False)
# -----------------------------------------------------------------------------
def lx_fields():
# return multiple reusable fields
fields = (
address_L4(),
address_L3(),
address_L2(),
address_L1(),
address_L0(),
)
return fields
s3.lx_fields = lx_fields
# -----------------------------------------------------------------------------
# Hide Lx fields in Create forms
# inc list_create (list_fields over-rides)
def lx_hide(table):
table.L4.readable = False
table.L3.readable = False
table.L2.readable = False
table.L1.readable = False
table.L0.readable = False
return
s3.lx_hide = lx_hide
# -----------------------------------------------------------------------------
def lx_onvalidation(form):
"""
Write the Lx fields from the Location
- used by pr_person, hrm_training, irs_ireport
@ToDo: Allow the reverse operation.
If these fields are populated then create/update the location
"""
vars = form.vars
if "location_id" in vars and vars.location_id:
table = s3db.gis_location
query = (table.id == vars.location_id)
location = db(query).selec | t(table.name,
table.level,
table.parent,
table.path,
| limitby=(0, 1)).first()
if location:
if location.level == "L0":
vars.L0 = location.name
elif location.level == "L1":
vars.L1 = location.name
if location.parent:
query = (table.id == location.parent)
country = db(query).select(table.name,
limitby=(0, 1)).first()
if country:
vars.L0 = country.name
else:
# Get Names of ancestors at each level
vars = gis.get_parent_per_level(vars,
vars.location_id,
feature=location,
ids=False,
names=True)
s3.lx_onvalidation = lx_onvalidation
# -----------------------------------------------------------------------------
def lx_update(table, record_id):
"""
Write the Lx fields from the Location
- used by hrm_human_resource & pr_address
@ToDo: Allow the reverse operation.
If these fields are populated then create/update the location
"""
if "location_id" in table:
ltable = s3db.gis_location
query = (table.id == record_id) & \
(ltable.id == table.location_id)
location = db(query).select(ltable.id,
ltable.name,
ltable.level,
ltable.parent,
ltable.path,
limitby=(0, 1)).first()
if location:
vars = Storage()
if location.level == "L0":
vars.L0 = location.name
elif location.level == "L1":
vars.L1 = location.name
if location.parent:
query = (ltable.id == location.parent)
country = db(query).select(ltable.name,
limitby=(0, 1)).first()
if country:
vars.L0 = country.name
else:
# Get Names of ancestors at each level
vars = gis.get_parent_per_level(vars,
location.id,
feature=location,
ids=False,
names=True)
# Update record
db(table.id == record_id).update(**vars)
s3.lx_update = lx_update
# =============================================================================
# Addresses
#
# These fields are populated onaccept from location_id
#
# @ToDo: Add Postcode to gis.update_table_hierarchy_labels()
#
address_building_name = S3ReusableField("building_name",
label=T("Building Name"),
readable=False,
writable=False)
address_address = S3ReusableField("address",
label=T("Address"),
readable=False,
writable=False)
address_postcode = S3ReusableField("postcode",
label=deployment_settings.get_ui_label_postcode(),
readab |
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
## Contact: Nokia Corporation (qt-info@nokia.com)
##
## This file is part of the documentation of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:LGPL$
## No Commercial Usage
## This file contains pre-release code and may not be distributed.
## You may use this file in accordance with the terms and conditions
## contained in the Technology Preview License Agreement accompanying
## this package.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Nokia gives you certain additional
## rights. These rights are described in the Nokia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## If you have questions regarding the use of this file, please contact
## Nokia at qt-info@nokia.com.
##
##
##
##
##
##
##
##
## $QT_END_LICENSE$
##
#############################################################################
import os, sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class CustomWidget(QWidget):
def __init__(self, parent, fake = False):
QWidget.__init__(self, parent)
gradient = QLinearGradient(QPointF(0, 0), QPointF(100.0, 100.0))
baseColor = QColor(0xa6, 0xce, 0x39, 0x7f)
gradient.setColorAt(0.0, baseColor.light(150))
gradient.setColorAt(0.75, baseColor.light(75))
self.brush = QBrush(gradient)
self.fake = fake
self.fakeBrush = QBrush(Qt.red, Qt.DiagCrossPattern)
qtPath = QPainterPath()
qtPath.setFillRule(Qt.OddEvenFill)
qtPath.moveTo(-45.0, -20.0)
qtPath.lineTo(0.0, -45.0)
qtPath.lineTo(45.0, -20.0)
qtPath.lineTo(45.0, 45.0)
qtPath.lineTo(-45.0, 45.0)
qtPath.lineTo(-45.0, -20.0)
qtPath.closeSubpath()
qtPath.moveTo(15.0, 5.0)
qtPath.lineTo(35.0, 5.0)
qtPath.lineTo(35.0, 40.0)
qtPath.lineTo(15.0, 40.0)
qtPath.lineTo(15.0, 5.0)
qtPath.moveTo(-35.0, -15.0)
qtPath.closeSubpath()
qtPath.lineTo(-10.0, -15.0)
qtPath.lineTo(-10.0, 10.0)
qtPath.lineTo(-35.0, 10.0)
qtPath.lineTo(-35.0, -15.0)
qtPath.closeSubpath()
self.path = qtPath
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.setRenderHint(QPainter.Antialiasing)
if self.fake:
painter.fillRect(event.rect(), QBrush(Qt.white))
painter.fillRect(event.rect(), self.fakeBrush)
painter.setBrush(self.brush)
painter.translate(60, 60)
painter.drawPath(self.path)
painter.end()
def sizeHint(self):
return QSize(120, 120)
def minimumSizeHint(self):
return QSize(120, 120)
if __name__ == "__main__":
try:
qt = sys.argv[1]
except IndexError:
qt = "4.1"
if qt != "4.0" and qt != "4.1":
sys.stderr.write("Usage: %s [4.0|4.1]\n" % sys.argv[0])
sys.exit(1)
app = QApplication(sys.argv)
exec_dir = os.path.split(os.path.abspath(sys.argv[0]))[0]
label = QLabel()
label.setPixmap(QPixmap(os.path.join(exec_dir, "background.png")))
layout = QGridLayout()
label.setLayout(layout)
if qt == "4.0":
layout.addWidget(CustomWidget(label), 0, 0, Qt.AlignCenter)
caption = QLabel("Opaque (Default)", label)
caption.setMargin(2)
layout.addWidget(caption, 1, 0, Qt.AlignCenter | Qt.AlignTop)
elif qt == "4.1":
layout.addWidget(CustomWidget(label), 0, 0, Qt.AlignCenter)
caption = QLabel("Contents Propagated (Default)", label)
caption.setAutoFillBackground(True)
caption.setMargin(2)
layout.addWidget(caption, 1, 0, Qt.AlignCenter | Qt.AlignTop)
if qt == "4.0":
contentsWidget = CustomWidget(label)
contentsWidget.setAttribute(Qt.WA_ContentsPropagated, True)
layout.addWidget(contentsWidget, 0, 1, Qt.AlignCenter)
caption = QLabel("With WA_ContentsPropagated set", label)
caption.setMargin(2)
layout.addWidget(caption, 1, 1, Qt.AlignCenter | Qt.AlignTop)
elif qt == "4.1":
autoFillWidget = CustomWidget(label)
autoFillWidget.setAutoFillBackground(True)
layout.addWidget(autoFillWidget, 0, 1, Qt.AlignCenter)
caption = QLabel("With autoFillBackground set", label)
caption.setAutoFillBackground(True)
caption.setMargin(2)
layout.addWidget(caption, 1, 1, Qt.AlignCenter | Qt.AlignTop)
if qt == "4.0":
noBackgroundWidget = CustomWidget(label, fake = True)
noBackgroundWidget.setAttribute(Qt.WA_NoBackground, True)
layout.addWidget(noBackgroundWidget, 0, 2, Qt.AlignCenter)
caption = QLabel("With WA_NoBackground set", label)
caption.setWo | rdWrap(True)
caption.setMargin(2)
layout.addWidget(caption, 1, 2, Qt.AlignCenter | | Qt.AlignTop)
elif qt == "4.1":
opaqueWidget = CustomWidget(label, fake = True)
opaqueWidget.setAttribute(Qt.WA_OpaquePaintEvent, True)
layout.addWidget(opaqueWidget, 0, 2, Qt.AlignCenter)
caption = QLabel("With WA_OpaquePaintEvent set", label)
caption.setAutoFillBackground(True)
caption.setMargin(2)
layout.addWidget(caption, 1, 2, Qt.AlignCenter | Qt.AlignTop)
if qt == "4.0":
label.setWindowTitle("Qt 4.0: Painting Custom Widgets")
elif qt == "4.1":
label.setWindowTitle("Qt 4.1: Painting Custom Widgets")
label.resize(404, 160)
label.show()
sys.exit(app.exec_())
|
"""
Application for testing syncing algorithm
(c) 2013-2014 by Mega Limited, Wellsford, New Zealand
This file is part of the MEGA SDK - Client Access Engine.
Applications using the MEGA API must present a valid application key
and comply with th | e the rules set forth in the Terms of Service.
The MEGA SDK is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
@copyright Simplified (2-clause) BSD License.
You should have received a copy of the license along with this
program.
"""
import sys
import os
import time
import shutil
import unittest
import xmlrunner
import subproces | s
import re
from sync_test_app import SyncTestApp
from sync_test import SyncTest
import logging
import argparse
class SyncTestMegaCliApp(SyncTestApp):
"""
operates with megacli application
"""
def __init__(self, local_mount_in, local_mount_out, delete_tmp_files=True, use_large_files=True, check_if_alive=True):
"""
local_mount_in: local upsync folder
local_mount_out: local downsync folder
"""
self.work_dir = os.path.join(".", "work_dir")
SyncTestApp.__init__(self, local_mount_in, local_mount_out, self.work_dir, delete_tmp_files, use_large_files)
self.check_if_alive = check_if_alive
def sync(self):
time.sleep(5)
def start(self):
# try to create work dir
return True
def finish(self):
try:
shutil.rmtree(self.work_dir)
except OSError, e:
logging.error("Failed to remove dir: %s (%s)" % (self.work_dir, e))
def is_alive(self):
"""
return True if application instance is running
"""
if not self.check_if_alive:
return True
s = subprocess.Popen(["ps", "axw"], stdout=subprocess.PIPE)
for x in s.stdout:
if re.search("megacli", x):
return True
return False
def pause(self):
"""
pause application
"""
# TODO: implement this !
raise NotImplementedError("Not Implemented !")
def unpause(self):
"""
unpause application
"""
# TODO: implement this !
raise NotImplementedError("Not Implemented !")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--test1", help="test_create_delete_files", action="store_true")
parser.add_argument("--test2", help="test_create_rename_delete_files", action="store_true")
parser.add_argument("--test3", help="test_create_delete_dirs", action="store_true")
parser.add_argument("--test4", help="test_create_rename_delete_dirs", action="store_true")
parser.add_argument("--test5", help="test_sync_files_write", action="store_true")
parser.add_argument("--test6", help="test_local_operations", action="store_true")
parser.add_argument("--test7", help="test_update_mtime", action="store_true")
parser.add_argument("--test8", help="test_create_rename_delete_unicode_files_dirs", action="store_true")
parser.add_argument("-a", "--all", help="run all tests", action="store_true")
parser.add_argument("-b", "--basic", help="run basic, stable tests", action="store_true")
parser.add_argument("-d", "--debug", help="use debug output", action="store_true")
parser.add_argument("-l", "--large", help="use large files for testing", action="store_true")
parser.add_argument("-n", "--nodelete", help="Do not delete work files", action="store_false")
parser.add_argument("-c", "--check", help="Do not check if megacli is running (useful, if other application is used for testing)", action="store_false")
parser.add_argument("upsync_dir", help="local upsync directory")
parser.add_argument("downsync_dir", help="local downsync directory")
args = parser.parse_args()
if args.debug:
lvl = logging.DEBUG
else:
lvl = logging.INFO
if args.all:
args.test1 = args.test2 = args.test3 = args.test4 = args.test5 = args.test6 = args.test7 = args.test8 = True
if args.basic:
args.test1 = args.test2 = args.test3 = args.test4 = True
logging.StreamHandler(sys.stdout)
logging.basicConfig(format='[%(asctime)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=lvl)
logging.info("")
logging.info("1) Start the first [megacli] and run the following command: sync " + args.upsync_dir + " [remote folder]")
logging.info("2) Start the second [megacli] and run the following command: sync " + args.downsync_dir + " [remote folder]")
logging.info("3) Wait for both folders get fully synced")
logging.info("4) Run: python %s", sys.argv[0])
logging.info("")
time.sleep(5)
with SyncTestMegaCliApp(args.upsync_dir, args.downsync_dir, args.nodelete, args.large, args.check) as app:
suite = unittest.TestSuite()
if args.test1:
suite.addTest(SyncTest("test_create_delete_files", app))
if args.test2:
suite.addTest(SyncTest("test_create_rename_delete_files", app))
if args.test3:
suite.addTest(SyncTest("test_create_delete_dirs", app, ))
if args.test4:
suite.addTest(SyncTest("test_create_rename_delete_dirs", app))
if args.test5:
suite.addTest(SyncTest("test_sync_files_write", app))
if args.test6:
suite.addTest(SyncTest("test_local_operations", app))
if args.test7:
suite.addTest(SyncTest("test_update_mtime", app))
if args.test8:
suite.addTest(SyncTest("test_create_rename_delete_unicode_files_dirs", app))
testRunner = xmlrunner.XMLTestRunner(output='test-reports')
testRunner.run(suite)
|
from utils.functions.models import row | s_to_dict_list_lower, GradeQtd
def grade_estoque(
cursor, ref=None, dep=None, data_ini=None, tipo_grade=None,
modelo=None, referencia=None):
filtro_modelo = ''
filtro_modelo_mask = ''
if modelo is not None:
filtro_modelo_mask = f'''--
AND
TRIM(
LEADING '0' FROM (
REGEXP_REPLACE(
{{}},
'^[abA | B]?([0-9]+)[a-zA-Z]*$',
'\\1'
)
)
) = '{modelo}'
'''
filtro_referencia = ''
filtro_referencia_mask = ''
if referencia is not None:
filtro_referencia_mask = f'''--
AND {{}} = '{referencia}'
'''
teste_dep = ''
if type(dep) is tuple:
teste_dep = ",".join(map(str, dep))
teste_dep = f" IN ({teste_dep})"
else:
teste_dep = f" = '{dep}'"
filtro_data_ini = ''
if data_ini is not None:
filtro_data_ini = (
"AND ee.DATA_MOVIMENTO >= "
f"TO_DATE('{data_ini}', 'yyyy-mm-dd')"
)
if tipo_grade is None:
tipo_grade = {
't': 'c', # tamanho como cadastrado
'c': 'e', # cores com estoque
}
# Grade de OP
grade = GradeQtd(cursor)
# tamanhos
if tipo_grade['t'] == 'm': # com movimento
filtro_ref = ''
if ref is not None:
filtro_ref = f"AND ee.GRUPO_ESTRUTURA = '{ref}'"
if modelo is not None:
filtro_modelo = filtro_modelo_mask.format('ee.GRUPO_ESTRUTURA')
if referencia is not None:
filtro_referencia = filtro_referencia_mask.format('ee.GRUPO_ESTRUTURA')
sql = f'''
SELECT DISTINCT
ee.SUBGRUPO_ESTRUTURA TAMANHO
, tam.ORDEM_TAMANHO SEQUENCIA_TAMANHO
FROM ESTQ_300_ESTQ_310 ee -- mov.de estoque em aberto e fechado
LEFT JOIN BASI_220 tam
ON tam.TAMANHO_REF = ee.SUBGRUPO_ESTRUTURA
WHERE ee.NIVEL_ESTRUTURA = 1
{filtro_ref} -- filtro_ref
{filtro_modelo} -- filtro_modelo
{filtro_referencia} -- filtro_referencia
AND ee.CODIGO_DEPOSITO {teste_dep}
{filtro_data_ini} -- filtro_data_ini
ORDER BY
2
'''
elif tipo_grade['t'] == 'e': # com estoque
filtro_ref = ''
if ref is not None:
filtro_ref = f"AND e.CDITEM_GRUPO = '{ref}'"
if modelo is not None:
filtro_modelo = filtro_modelo_mask.format('e.CDITEM_GRUPO')
if referencia is not None:
filtro_referencia = filtro_referencia_mask.format('e.CDITEM_GRUPO')
sql = f'''
SELECT DISTINCT
e.CDITEM_SUBGRUPO TAMANHO
, tam.ORDEM_TAMANHO SEQUENCIA_TAMANHO
FROM ESTQ_040 e
LEFT JOIN BASI_220 tam
ON tam.TAMANHO_REF = e.CDITEM_SUBGRUPO
WHERE e.CDITEM_NIVEL99 = 1
{filtro_ref} -- filtro_ref
{filtro_modelo} -- filtro_modelo
{filtro_referencia} -- filtro_referencia
AND e.DEPOSITO {teste_dep}
AND e.QTDE_ESTOQUE_ATU <> 0
ORDER BY
2
'''
elif tipo_grade['t'] == 'c': # como cadastrado
filtro_ref = ''
if ref is not None:
filtro_ref = f"AND t.BASI030_REFERENC = '{ref}'"
if modelo is not None:
filtro_modelo = filtro_modelo_mask.format('t.BASI030_REFERENC')
if referencia is not None:
filtro_referencia = filtro_referencia_mask.format('t.BASI030_REFERENC')
sql = f'''
SELECT DISTINCT
t.TAMANHO_REF TAMANHO
, tam.ORDEM_TAMANHO SEQUENCIA_TAMANHO
FROM basi_020 t
LEFT JOIN BASI_220 tam
ON tam.TAMANHO_REF = t.TAMANHO_REF
WHERE t.BASI030_NIVEL030 = 1
{filtro_ref} -- filtro_ref
{filtro_modelo} -- filtro_modelo
{filtro_referencia} -- filtro_referencia
ORDER BY
2
'''
grade.col(
id='TAMANHO',
name='Tamanho',
total='Total',
forca_total=True,
sql=sql,
)
# cores
if tipo_grade['c'] == 'm': # com movimento
filtro_ref = ''
if ref is not None:
filtro_ref = f"AND ee.GRUPO_ESTRUTURA = '{ref}'"
if modelo is not None:
filtro_modelo = filtro_modelo_mask.format('ee.GRUPO_ESTRUTURA')
if referencia is not None:
filtro_referencia = filtro_referencia_mask.format('ee.GRUPO_ESTRUTURA')
sql = f'''
SELECT DISTINCT
ee.ITEM_ESTRUTURA SORTIMENTO
FROM ESTQ_300_ESTQ_310 ee -- mov. de estoque em aberto e fechado
WHERE ee.NIVEL_ESTRUTURA = 1
{filtro_ref} -- filtro_ref
{filtro_modelo} -- filtro_modelo
{filtro_referencia} -- filtro_referencia
AND ee.CODIGO_DEPOSITO {teste_dep}
{filtro_data_ini} -- filtro_data_ini
ORDER BY
ee.ITEM_ESTRUTURA
'''
elif tipo_grade['c'] == 'e': # com estoque
filtro_ref = ''
if ref is not None:
filtro_ref = f"AND e.CDITEM_GRUPO = '{ref}'"
if modelo is not None:
filtro_modelo = filtro_modelo_mask.format('e.CDITEM_GRUPO')
if referencia is not None:
filtro_referencia = filtro_referencia_mask.format('e.CDITEM_GRUPO')
sql = f'''
SELECT DISTINCT
e.CDITEM_ITEM SORTIMENTO
FROM ESTQ_040 e
WHERE e.CDITEM_NIVEL99 = 1
{filtro_ref} -- filtro_ref
{filtro_modelo} -- filtro_modelo
{filtro_referencia} -- filtro_referencia
AND e.DEPOSITO {teste_dep}
AND e.QTDE_ESTOQUE_ATU <> 0
ORDER BY
e.CDITEM_ITEM
'''
grade.row(
id='SORTIMENTO',
name='Cor',
name_plural='Cores',
total='Total',
forca_total=True,
sql=sql,
)
# sortimento
filtro_ref = ''
if ref is not None:
filtro_ref = f"AND e.CDITEM_GRUPO = '{ref}'"
if modelo is not None:
filtro_modelo = filtro_modelo_mask.format('e.CDITEM_GRUPO')
if referencia is not None:
filtro_referencia = filtro_referencia_mask.format('e.CDITEM_GRUPO')
sql = f'''
SELECT
e.CDITEM_SUBGRUPO TAMANHO
, e.CDITEM_ITEM SORTIMENTO
, SUM(e.QTDE_ESTOQUE_ATU) QUANTIDADE
FROM ESTQ_040 e
WHERE e.LOTE_ACOMP = 0
AND e.CDITEM_NIVEL99 = 1
{filtro_ref} -- filtro_ref
{filtro_modelo} -- filtro_modelo
{filtro_referencia} -- filtro_referencia
AND e.DEPOSITO {teste_dep}
GROUP BY
e.CDITEM_SUBGRUPO
, e.CDITEM_ITEM
ORDER BY
e.CDITEM_SUBGRUPO
, e.CDITEM_ITEM
'''
grade.value(
id='QUANTIDADE',
sql=sql,
)
fields = grade.table_data['fields']
data = grade.table_data['data']
style = grade.table_data['style']
result = (
grade.table_data['header'],
fields,
data,
style,
grade.total,
)
return result
|
import py | test
import dask.array as da
from ..utils import assert_eq
xr = pytest.importorskip("xarray") |
def test_mean():
y = da.mean(xr.DataArray([1, 2, 3.0]))
assert isinstance(y, da.Array)
assert_eq(y, y)
def test_asarray():
y = da.asarray(xr.DataArray([1, 2, 3.0]))
assert isinstance(y, da.Array)
assert_eq(y, y)
def test_asanyarray():
y = da.asanyarray(xr.DataArray([1, 2, 3.0]))
assert isinstance(y, da.Array)
assert_eq(y, y)
|
"""Precompute the polynomials for the asymptotic expansion of the
generalized exponential integral.
Sources
-------
[1] NIST, Digital Library of Mathematical Functions,
http://dlmf.nist.gov/8.20#ii
"""
from __future__ import division, print_function, absolute_import
import os
import warnings
try:
# Can remove when sympy #11255 is resolved; see
# https://github.com/sympy/sympy/issues/11255
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
import sympy
from sympy import Poly
x = sympy.symbols('x')
except ImportError:
pass
def generate_A(K):
A = [Poly(1, x)]
for k in range(K):
A.append(Poly(1 - 2*k*x, x)*A[k] + Poly(x*(x + 1))*A[k].diff())
return A
WARNING = """\
/* This file was automatically generated by _precompute/expn_asy.py. |
* Do not edit it manually!
*/
"""
def main():
print(__doc__)
fn = os.path.join('..', 'cephes', 'expn.h')
K = 12
A = generate_A(K)
with open(fn + '.new', 'w') as f:
f.write(WARNING)
f.write("#define nA {}\n".format(len(A)))
for k, Ak in enumerate(A):
tmp = ', '.join([str(x.evalf(18)) for x in Ak.coeffs()])
f.write("d | ouble A{}[] = {{{}}};\n".format(k, tmp))
tmp = ", ".join(["A{}".format(k) for k in range(K + 1)])
f.write("double *A[] = {{{}}};\n".format(tmp))
tmp = ", ".join([str(Ak.degree()) for Ak in A])
f.write("int Adegs[] = {{{}}};\n".format(tmp))
os.rename(fn + '.new', fn)
if __name__ == "__main__":
main()
|
face': 'Logowanie do panelu administracyjnego',
'Models': 'Modele',
'Modules': 'Moduły',
'NO': 'NIE',
'New Record': 'Nowy rekord',
'New application wizard': 'New application wizard',
'New simple application': 'New simple application',
'No databases in this application': 'Brak baz danych w tej aplikacji',
'Original/Translation': 'Oryginał/tłumaczenie',
'PAM authenticated u | ser, cannot change password here': 'PAM authenticated user, cannot change password here',
'Peeking at file': 'Podgląd pliku',
'Plugin "%s" in application': 'Wtyczka "%s" w aplikacji',
'Plugins': 'Wtyczki',
'Powered by': 'Zasilane przez',
'Query:': 'Zapytanie:',
'Resolve Conflict file': 'Rozwiąż ko | nflikt plików',
'Rows in table': 'Wiersze w tabeli',
'Rows selected': 'Wierszy wybranych',
'Saved file hash:': 'Suma kontrolna zapisanego pliku:',
'Searching:': 'Searching:',
'Static files': 'Pliki statyczne',
'Sure you want to delete this object?': 'Czy na pewno chcesz usunąć ten obiekt?',
'TM': 'TM',
'Testing application': 'Testowanie aplikacji',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"Zapytanie" jest warunkiem postaci "db.tabela1.pole1==\'wartość\'". Takie coś jak "db.tabela1.pole1==db.tabela2.pole2" oznacza SQL JOIN.',
'The application logic, each URL path is mapped in one exposed function in the controller': 'The application logic, each URL path is mapped in one exposed function in the controller',
'The data representation, define database tables and sets': 'The data representation, define database tables and sets',
'The presentations layer, views are also known as templates': 'The presentations layer, views are also known as templates',
'There are no controllers': 'Brak kontrolerów',
'There are no models': 'Brak modeli',
'There are no modules': 'Brak modułów',
'There are no plugins': 'There are no plugins',
'There are no static files': 'Brak plików statycznych',
'There are no translators, only default language is supported': 'Brak plików tłumaczeń, wspierany jest tylko domyślny język',
'There are no views': 'Brak widoków',
'These files are served without processing, your images go here': 'These files are served without processing, your images go here',
'This is the %(filename)s template': 'To jest szablon %(filename)s',
'Ticket': 'Bilet',
'To create a plugin, name a file/folder plugin_[name]': 'Aby utworzyć wtyczkę, nazwij plik/katalog plugin_[nazwa]',
'Translation strings for the application': 'Translation strings for the application',
'Unable to check for upgrades': 'Nie można sprawdzić aktualizacji',
'Unable to download': 'Nie można ściągnąć',
'Unable to download app': 'Nie można ściągnąć aplikacji',
'Unable to download app because:': 'Unable to download app because:',
'Unable to download because': 'Unable to download because',
'Update:': 'Uaktualnij:',
'Upload & install packed application': 'Upload & install packed application',
'Upload a package:': 'Upload a package:',
'Upload existing application': 'Wyślij istniejącą aplikację',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Użyj (...)&(...) jako AND, (...)|(...) jako OR oraz ~(...) jako NOT do tworzenia bardziej skomplikowanych zapytań.',
'Use an url:': 'Use an url:',
'Version': 'Wersja',
'Views': 'Widoki',
'Welcome to web2py': 'Witaj w web2py',
'YES': 'TAK',
'about': 'informacje',
'additional code for your application': 'dodatkowy kod Twojej aplikacji',
'admin disabled because no admin password': 'panel administracyjny wyłączony z powodu braku hasła administracyjnego',
'admin disabled because not supported on google app engine': 'panel administracyjny wyłączony z powodu braku wsparcia na google apps engine',
'admin disabled because unable to access password file': 'panel administracyjny wyłączony z powodu braku dostępu do pliku z hasłem',
'administrative interface': 'administrative interface',
'and rename it (required):': 'i nadaj jej nową nazwę (wymagane):',
'and rename it:': 'i nadaj mu nową nazwę:',
'appadmin': 'administracja aplikacji',
'appadmin is disabled because insecure channel': 'administracja aplikacji wyłączona z powodu braku bezpiecznego połączenia',
'application "%s" uninstalled': 'aplikacja "%s" została odinstalowana',
'application compiled': 'aplikacja została skompilowana',
'application is compiled and cannot be designed': 'aplikacja jest skompilowana i nie może być projektowana',
'arguments': 'arguments',
'back': 'wstecz',
'cache': 'cache',
'cache, errors and sessions cleaned': 'pamięć podręczna, bilety błędów oraz pliki sesji zostały wyczyszczone',
'cannot create file': 'nie można utworzyć pliku',
'cannot upload file "%(filename)s"': 'nie można wysłać pliku "%(filename)s"',
'change admin password': 'change admin password',
'check all': 'zaznacz wszystko',
'check for upgrades': 'check for upgrades',
'clean': 'oczyść',
'click here for online examples': 'kliknij aby przejść do interaktywnych przykładów',
'click here for the administrative interface': 'kliknij aby przejść do panelu administracyjnego',
'click to check for upgrades': 'kliknij aby sprawdzić aktualizacje',
'code': 'code',
'collapse/expand all': 'collapse/expand all',
'compile': 'skompiluj',
'compiled application removed': 'skompilowana aplikacja została usunięta',
'controllers': 'kontrolery',
'create': 'create',
'create file with filename:': 'utwórz plik o nazwie:',
'create new application:': 'utwórz nową aplikację:',
'created by': 'utworzone przez',
'crontab': 'crontab',
'currently running': 'currently running',
'currently saved or': 'aktualnie zapisany lub',
'data uploaded': 'dane wysłane',
'database': 'baza danych',
'database %s select': 'wybór z bazy danych %s',
'database administration': 'administracja bazy danych',
'db': 'baza danych',
'defines tables': 'zdefiniuj tabele',
'delete': 'usuń',
'delete all checked': 'usuń wszystkie zaznaczone',
'delete plugin': 'usuń wtyczkę',
'deploy': 'deploy',
'design': 'projektuj',
'direction: ltr': 'direction: ltr',
'done!': 'zrobione!',
'download layouts': 'download layouts',
'download plugins': 'download plugins',
'edit': 'edytuj',
'edit controller': 'edytuj kontroler',
'edit views:': 'edit views:',
'errors': 'błędy',
'export as csv file': 'eksportuj jako plik csv',
'exposes': 'eksponuje',
'extends': 'rozszerza',
'failed to reload module': 'nie udało się przeładować modułu',
'failed to reload module because:': 'failed to reload module because:',
'file "%(filename)s" created': 'plik "%(filename)s" został utworzony',
'file "%(filename)s" deleted': 'plik "%(filename)s" został usunięty',
'file "%(filename)s" uploaded': 'plik "%(filename)s" został wysłany',
'file "%(filename)s" was not deleted': 'plik "%(filename)s" nie został usunięty',
'file "%s" of %s restored': 'plik "%s" z %s został odtworzony',
'file changed on disk': 'plik na dysku został zmieniony',
'file does not exist': 'plik nie istnieje',
'file saved on %(time)s': 'plik zapisany o %(time)s',
'file saved on %s': 'plik zapisany o %s',
'files': 'files',
'filter': 'filter',
'help': 'pomoc',
'htmledit': 'edytuj HTML',
'includes': 'zawiera',
'insert new': 'wstaw nowy rekord tabeli',
'insert new %s': 'wstaw nowy rekord do tabeli %s',
'install': 'install',
'internal error': 'wewnętrzny błąd',
'invalid password': 'błędne hasło',
'invalid request': 'błędne zapytanie',
'invalid ticket': 'błędny bilet',
'language file "%(filename)s" created/updated': 'plik tłumaczeń "%(filename)s" został utworzony/uaktualniony',
'languages': 'pliki tłumaczeń',
'languages updated': 'pliki tłumaczeń zostały uaktualnione',
'loading...': 'wczytywanie...',
'login': 'zaloguj',
'logout': 'wyloguj',
'merge': 'zespól',
'models': 'modele',
'modules': 'moduły',
'new application "%s" created': 'nowa aplikacja "%s" została utworzona',
'new plugin installed': 'nowa wtyczka została zainstalowana',
'new record inserted': 'nowy rekord został wstawiony',
'next 100 rows': 'następne 100 wierszy',
'no match': 'no match',
'or import from csv file': 'lub zaimportuj z pliku csv',
'or provide app url:': 'or provide app url:',
'or provide application url:': 'lub podaj url aplikacji:',
'overwrite installed app': 'overwrite ins |
@property
def monitors(self):
if self._values['monitors'] is None:
return None
if self._values['monitors'] == '/Common/bigip':
return '/Common/bigip'
monitors = [fq_name(self.partition, x) for x in self.monitors_list]
if self.availability_requirement_type == 'at_least':
monitors = ' '.join(monitors)
result = 'min {0} of {{ {1} }}'.format(self.at_least, monitors)
elif self.availability_requirement_type == 'require':
monitors = ' '.join(monitors)
result = 'require {0} from {1} {{ {2} }}'.format(self.number_of_probes, self.number_of_probers, monitors)
else:
result = ' and '.join(monitors).strip()
return result
@property
def number_of_probes(self):
"""Returns the probes value from the monitor string.
The monitor string for a Require monitor looks like this.
require 1 from 2 { /Common/tcp }
This method parses out the first of the numeric values. This values represents
the "probes" value that can be updated in the module.
Returns:
int: The probes value if found. None otherwise.
"""
if self._values['monitors'] is None:
return None
pattern = r'require\s+(?P<probes>\d+)\s+from'
matches = re.search(pattern, self._values['monitors'])
if matches is None:
return None
return matches.group('probes')
@property
def number_of_probers(self):
"""Returns the probers value from the monitor string.
The monitor string for a Require monitor looks like this.
require 1 from 2 { /Common/tcp }
This method parses out the first of the numeric values. This values represents
the "probers" value that can be updated in the module.
Returns:
int: The probers value if found. None otherwise.
"""
if self._values['monitors'] is None:
return None
pattern = r'require\s+\d+\s+from\s+(?P<probers>\d+)\s+'
matches = re.search(pattern, self._values['monitors'])
if matches is None:
return None
return matches.group('probers')
@property
def at_least(self):
"""Returns the 'at least' value from the monitor string.
The monitor string for a Require monitor looks like this.
min 1 of { /Common/gateway_icmp }
This method parses out the first of the numeric values. This values represents
the "at_least" value that can be updated in the module.
Returns:
int: The at_least value if found. None otherwise.
"""
if self._values['monitors'] is None:
return None
pattern = r'min\s+(?P<least>\d+)\s+of\s+'
matches = re.search(pattern, self._values['monitors'])
if matches is None:
return None
return matches.group('least')
class ModuleParameters(Parameters):
def _get_limit_value(self, type):
if self._values['limits'] is None:
return None
if self._values['limits'][type] is None:
return None
return int(self._values['limits'][type])
def _get_limit_status(self, type):
if self._values['limits'] is None:
return None
if self._values['limits'][type] is None:
return None
if self._values['limits'][type]:
return 'enabled'
return 'disabled'
@property
def devices(self):
if self._values['devices'] is None:
return None
result = []
for device in self._values['devices']:
if not any(x for x in ['address', 'addresses'] if x in device):
raise F5ModuleError(
"The specified device list must contain an 'address' or 'addresses' key"
)
if 'address' in device:
translation = self._determine_translation(device)
name = device['address']
device_name = device['name']
result.append({
'name': name,
'deviceName': device_name,
'translation': translation
})
elif 'addresses' in device:
for address in device['addresses']:
translation = self._determine_translation(address)
name = address['address']
device_name = device['name']
result.append({
'name': name,
'deviceName': device_name,
'translation': translation
})
return result
@property
def enabled(self):
if self._values['state'] in ['present', 'enabled']:
return True
return False
@property
def datacenter(self):
if self._values['datacenter'] is None:
return None
return fq_name(self.partition, self._values['datacenter'])
def _determine_translation(self, device):
if 'translation' not in device:
return 'none'
return device['translation']
@property
| def state(self):
if self._values['state'] == 'enabled':
return 'present'
return self._values['state']
@property
def iquery_allow_path(self):
if self._values['iquery_options'] is None:
return None
elif self._values['iquery_options']['allow_path'] is None:
return None
| return self._values['iquery_options']['allow_path']
@property
def iquery_allow_service_check(self):
if self._values['iquery_options'] is None:
return None
elif self._values['iquery_options']['allow_service_check'] is None:
return None
return self._values['iquery_options']['allow_service_check']
@property
def iquery_allow_snmp(self):
if self._values['iquery_options'] is None:
return None
elif self._values['iquery_options']['allow_snmp'] is None:
return None
return self._values['iquery_options']['allow_snmp']
@property
def monitors_list(self):
if self._values['monitors'] is None:
return []
try:
result = re.findall(r'/\w+/[^\s}]+', self._values['monitors'])
result.sort()
return result
except Exception:
return self._values['monitors']
@property
def monitors(self):
if self._values['monitors'] is None:
return None
if is_empty_list(self._values['monitors']):
return '/Common/bigip'
monitors = [fq_name(self.partition, x) for x in self.monitors_list]
if self.availability_requirement_type == 'at_least':
if self.at_least > len(self.monitors_list):
raise F5ModuleError(
"The 'at_least' value must not exceed the number of 'monitors'."
)
monitors = ' '.join(monitors)
result = 'min {0} of {{ {1} }}'.format(self.at_least, monitors)
elif self.availability_requirement_type == 'require':
monitors = ' '.join(monitors)
if self.number_of_probes > self.number_of_probers:
raise F5ModuleError(
"The 'number_of_probes' must not exceed the 'number_of_probers'."
)
result = 'require {0} from {1} {{ {2} }}'.format(self.number_of_probes, self.number_of_probers, monitors)
else:
result = ' and '.join(monitors).strip()
return result
def _get_availability_value(self, type):
if self._values['availability_requirements'] is None:
return None
if self._values['availability_requirements'][type] is None:
return None
return int(self._values['availability_requirements'][type])
@property
def availability_requirement_type(self):
if self._values['availability_requirements'] is None:
return None
ret |
# -*- coding: utf-8 -*-
"""
***************************************************************************
ExtentFromLayer.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import *
from qgis.core import *
from sextante.core.GeoAlgorithm import GeoAlgorithm
from sextante.core.QGisLayers import QGisLayers
from sextante.parameters.ParameterVector import ParameterVector
from sextante.parameters.ParameterBoolean import ParameterBoolean
from sextante.outputs.OutputVector import OutputVector
class ExtentFromLayer(GeoAlgorithm):
INPUT_LAYER = "INPUT_LAYER"
BY_FEATURE = "BY_FEATURE"
OUTPUT = "OUTPUT"
#===========================================================================
# def getIcon(self):
# return QtGui.QIcon(os.path.dirname(__file__) + "/icons/layer_extent.png")
#===========================================================================
def defineCharacteristics(self):
self.name = "Polygon from layer extent"
self.group = "Vector general tools"
self.addParameter(ParameterVector(self.INPUT_LAYER, "Input layer", ParameterVector.VECTOR_TYPE_ANY))
self.addParameter(ParameterBoolean(self.BY_FEATURE, "Calculate extent for each feature separately", False))
self.addOutput(OutputVector(self.OUTPUT, "Output layer"))
def processAlgorithm(self, progress):
layer = QGisLayers.getObjectFromUri(self.getParameterValue(self.INPUT_LAYER))
byFeature = self.getParameterValue(self.BY_FEATURE)
fields = [ QgsField("MINX", QVariant.Double),
QgsField("MINY", QVariant.Double),
QgsField("MAXX", QVariant.Double),
QgsField("MAXY", QVariant.Double),
QgsField("CNTX", QVariant.Double),
QgsField("CNTY", QVariant.Double),
QgsField("AREA", QVariant.Double),
QgsField("PERIM", QVariant.Double),
QgsField("HEIGHT", QVariant.Double),
QgsField("WIDTH", QVariant.Double)
]
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(fields,
QGis.WKBPolygon, layer.crs())
if byFeature:
self.featureExtent(layer, writer, progress)
else:
self.layerExtent(layer, writer, progress)
del writer
def layerExtent(self, layer, writer, progress):
rect = layer.extent()
minx = rect.xMinimum()
miny = rect.yMinimum()
maxx = rect.xMaximum()
maxy = rect.yMaximum()
height = rect.height()
width = rect.width()
cntx = minx + (width / 2.0)
cnty = miny + (height / 2.0)
area = width * height
| perim = (2 * width) + (2 * height)
rect = [QgsPoint(minx, miny),
QgsPoint(minx, maxy),
QgsPoint(maxx, maxy),
QgsPoint(maxx, miny),
QgsPoint(minx, miny)
]
geometry = QgsGeometry().fromPolygon([rect])
feat = QgsFea | ture()
feat.setGeometry(geometry)
attrs = [QVariant(minx),
QVariant(miny),
QVariant(maxx),
QVariant(maxy),
QVariant(cntx),
QVariant(cnty),
QVariant(area),
QVariant(perim),
QVariant(height),
QVariant(width)
]
feat.setAttributes(attrs)
writer.addFeature(feat)
def featureExtent(self, layer, writer, progress):
current = 0
features = QGisLayers.features(layer)
total = 100.0 / float(len(features))
feat = QgsFeature()
for f in features:
rect = f.geometry().boundingBox()
minx = rect.xMinimum()
miny = rect.yMinimum()
maxx = rect.xMaximum()
maxy = rect.yMaximum()
height = rect.height()
width = rect.width()
cntx = minx + (width / 2.0)
cnty = miny + (height / 2.0)
area = width * height
perim = (2 * width) + (2 * height)
rect = [QgsPoint(minx, miny),
QgsPoint(minx, maxy),
QgsPoint(maxx, maxy),
QgsPoint(maxx, miny),
QgsPoint(minx, miny)
]
geometry = QgsGeometry().fromPolygon([rect])
feat.setGeometry(geometry)
attrs = [QVariant(minx),
QVariant(miny),
QVariant(maxx),
QVariant(maxy),
QVariant(cntx),
QVariant(cnty),
QVariant(area),
QVariant(perim),
QVariant(height),
QVariant(width)
]
feat.setAttributes(attrs)
writer.addFeature(feat)
current += 1
progress.setPercentage(int(current * total))
|
add_argument('--tf_value_cuttoff',
type=float,
default=0.0,
help='The cutoff z-score tf_value for which activity plots are generated (default: 0.0) ')
parser.add_argument('--version', help='Print version and exit.', action='version',
version='Version %s' % HAYSTACK_VERSION)
return parser
#@profile
def main(input_args=None):
print '\n[H A Y S T A C K P I P E L I N E]'
print('\n-SELECTION OF HOTSPOTS OF VARIABILITY AND ENRICHED MOTIFS-\n')
print 'Version %s\n' % HAYSTACK_VERSION
parser = get_args_pipeline()
args = parser.parse_args(input_args)
args_dict = vars(args)
for key, value in args_dict.items():
exec ('%s=%s' % (key, repr(value)))
if meme_motifs_filename:
check_file(meme_motifs_filename)
if motif_mapping_filename:
check_file(motif_mapping_filename)
if not os.path.exists(temp_directory):
error('The folder specified with --temp_directory: %s does not exist!' % temp_directory)
sys.exit(1)
if input_is_bigwig:
extension_to_check = '.bw'
info('Input is set BigWig (.bw)')
else:
extension_to_check = '.bam'
info('Input is set compressed SAM (.bam)')
if name:
directory_name = 'HAYSTACK_PIPELINE_RESULTS_on_%s' % name
else:
directory_name = 'HAYSTACK_PIPELINE_RESULTS'
if output_directory:
output_directory = os.path.join(output_directory, directory_name)
else:
output_directory = directory_name
# check folder or sample filename
USE_GENE_EXPRESSION = True
if not os.path.exists(samples_filename_or_bam_folder):
error("The file or folder %s doesn't exist. Exiting." %
samples_filename_or_bam_folder)
sys.exit(1)
if os.path.isfile(samples_filename_or_bam_folder):
| BAM_FOLDER = False
data_filenames = []
gene_expression_filenames = []
sample_na | mes = []
with open(samples_filename_or_bam_folder) as infile:
for line in infile:
if not line.strip():
continue
if line.startswith('#'): # skip optional header line or empty lines
info('Skipping header/comment line:%s' % line)
continue
fields = line.strip().split()
n_fields = len(fields)
if n_fields == 2:
USE_GENE_EXPRESSION = False
sample_names.append(fields[0])
data_filenames.append(fields[1])
elif n_fields == 3:
USE_GENE_EXPRESSION = USE_GENE_EXPRESSION and True
sample_names.append(fields[0])
data_filenames.append(fields[1])
gene_expression_filenames.append(fields[2])
else:
error('The samples file format is wrong!')
sys.exit(1)
else:
if os.path.exists(samples_filename_or_bam_folder):
BAM_FOLDER = True
USE_GENE_EXPRESSION = False
data_filenames = glob.glob(os.path.join(samples_filename_or_bam_folder, '*' + extension_to_check))
if not data_filenames:
error('No bam/bigwig files to analyze in %s. Exiting.' % samples_filename_or_bam_folder)
sys.exit(1)
sample_names = [os.path.basename(data_filename).replace(extension_to_check, '') for data_filename in
data_filenames]
else:
error("The file or folder %s doesn't exist. Exiting." % samples_filename_or_bam_folder)
sys.exit(1)
# check all the files before starting
info('Checking samples files location...')
for data_filename in data_filenames:
check_file(data_filename)
if USE_GENE_EXPRESSION:
for gene_expression_filename in gene_expression_filenames:
check_file(gene_expression_filename)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
# copy back the file used
if not BAM_FOLDER:
shutil.copy2(samples_filename_or_bam_folder, output_directory)
# write hotspots conf files
sample_names_hotspots_filename = os.path.join(output_directory,
'sample_names_hotspots.txt')
with open(sample_names_hotspots_filename, 'w+') as outfile:
for sample_name, data_filename in zip(sample_names, data_filenames):
outfile.write('%s\t%s\n' % (sample_name, data_filename))
#CALL HAYSTACK HOTSPOTS
cmd_to_run='haystack_hotspots %s %s --output_directory %s --bin_size %d %s %s %s %s %s %s %s %s %s %s %s %s %s %s' % \
(sample_names_hotspots_filename, genome_name,output_directory,bin_size,
('--do_not_filter_bams' if do_not_filter_bams else ''),
('--depleted' if depleted else ''),
('--do_not_recompute' if do_not_recompute else ''),
('--keep_intermediate_files' if keep_intermediate_files else ''),
('--input_is_bigwig' if input_is_bigwig else ''),
('--disable_quantile_normalization' if disable_quantile_normalization else ''),
'--transformation %s' % transformation,
'--chrom_exclude "%s"' % chrom_exclude,
'--z_score_high %f' % z_score_high,
'--z_score_low %f' % z_score_low,
'--th_rpm %f' % th_rpm,
'--blacklist %s' % blacklist,
'--read_ext %d' % read_ext,
'--n_processes %d' % n_processes)
print(cmd_to_run)
sb.call(cmd_to_run ,shell=True)
# CALL HAYSTACK MOTIFS
motif_directory = os.path.join(output_directory,'HAYSTACK_MOTIFS')
for sample_name in sample_names:
specific_regions_filename = os.path.join(output_directory, 'HAYSTACK_HOTSPOTS', 'SPECIFIC_REGIONS',
'Regions_specific_for_%s*.bed' % sample_name)
bg_regions_filename = glob.glob(os.path.join(output_directory, 'HAYSTACK_HOTSPOTS', 'SPECIFIC_REGIONS',
'Background_for_%s*.bed' % sample_name))[0]
cmd_to_run = 'haystack_motifs %s %s --bed_bg_filename %s --output_directory %s --name %s' % (
specific_regions_filename, genome_name, bg_regions_filename, motif_directory, sample_name)
if meme_motifs_filename:
cmd_to_run += ' --meme_motifs_filename %s' % meme_motifs_filename
if n_processes:
cmd_to_run += ' --n_processes %d' % n_processes
if temp_directory:
cmd_to_run += ' --temp_directory %s' % temp_directory
print(cmd_to_run)
sb.call(cmd_to_run, shell=True)
if USE_GENE_EXPRESSION:
sample_names_tf_activity_filename = os.path.join(output_directory,
'sample_names_tf_activity.txt')
with open(sample_names_tf_activity_filename, 'w+') as outfile:
for sample_name, gene_expression_filename in zip(sample_names,
gene_expression_filenames):
outfile.write('%s\t%s\n' % (sample_name,
gene_expression_filename))
tf_activity_directory = os.path.join(output_directory,
'HAYSTACK_TFs_ACTIVITY_PLANES')
for sample_name in sample_names:
# write tf activity conf files
# CALL HAYSTACK TF ACTIVITY
motifs_output_folder = os.path.join(motif_directory,
'HAYSTACK_MOTIFS_on_%s' % sample_name)
if os.path.exists(motifs_output_folder):
cmd_to_run='haystack_tf_activity_plane %s %s %s --output_directory %s' %(motifs_output_folder,
sample_names_tf_activity |
"""Module for configuring the host environment"""
import os
import json
import sys
class HostConfig():
"""Sets up the required components on the host environment"""
def __init__(self):
"""Host"""
self.path = None
self.apps = None
def get_path(self):
"""Gets the root path to build folders in"""
self.path = input(
'Enter the root directory, on this host, to store docker \
configurations (ex: /home/user): ')
if not self.path:
print('Path is required. Re-execute script and specify a path.')
sys.exit(1)
if self.path[0] != '/':
self.path = '/{}'.format(self.path)
if self.path[-1] != '/':
self.path = '{}/'.format(self.path)
if not os.path.exists(self.path):
print(
'Specified path does not exist. Please create path and re-execute script.')
sys.exit(1)
return self.path
def get_apps(self):
"""Gets the json file containing app information"""
with open('data/apps.j | son', 'r') as file:
content = json.load(file)
self.apps = [app for app in content.get('apps')]
def build(self):
"""Builds folders using the specified path and apps.json"""
print('Building folders...')
for app in self.apps:
try:
app_path = '{}{}'.forma | t(self.path, app.get('name'))
os.mkdir(app_path)
print('Created {}'.format(app_path))
except OSError:
print('Folder for {} already exists. Skipping..')
if __name__ == '__main__':
pass
|
'''
Broken (piecewise continuous) random field generation using rft1d.randn1d
|
Note:
When FWHM gets large (2FWHM>nNodes), the data should be padded
using the *pad* keyword.
'''
import numpy as np
from matplotlib import pyplot
import rft1d
#(0) Set parameters:
np.random.seed(12345)
nResponses = 5
nNodes = 101
FWHM = 20.0
### create a boolean mask:
nodes = np.array([True]*nNodes) #nothing masked out
nodes[20:30] = False #this region will be masked out
nodes[60:80] = False #this region will be masked out
#(1) Generate Gaussian 1D fields:
y = rft1d.randn1d(nResponses, nodes, FWHM)
#(2) Plot:
pyplot.close('all')
pyplot.plot(y.T)
pyplot.plot([0,100], [0,0], 'k:')
pyplot.xlabel('Field position', size=16)
pyplot.ylabel('z', size=20)
pyplot.title('Broken (piecewise continuous) random fields', size=20)
pyplot.show()
| |
"""Uploads apk to rollout track with user fraction."""
import sys
import socket
from apiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
import subprocess
import xml.etree.ElementTree as ET
import os
from pathlib import Path
TRACK = 'beta'
USER_FRACTION = 1
APK_FILE = '../platforms/android/build/outputs/apk/release/android-release.apk'
CREDENTIALS_JSON = 'playstore-service-account.json'
def main(argv):
package_name = os.environ.get('PACKAGE_NAME')
if package_name:
print('using provided package name', package_name)
else:
# get package name from somewhere
print('finding package name')
package_name = ET.parse('../platforms/android/res/xml/config.xml').getroot().attrib['id']
print('found package name', package_name)
print()
apk_file = os.environ.get('APK_FILE')
if apk_file is None:
print('using default apk file path', APK_FILE)
apk_file = APK_FILE
print('Retrieving release notes from CHANGELOG.md...')
releaseText = subprocess.run('../../scripts/get_newest_release.js', stdout=subprocess.PIPE).stdout.decode()
if len(releaseText) > 500:
releaseText = releaseText[:495] + '\n...'
print()
print(releaseText)
print()
credentials = ServiceAccountCredentials.from_json_keyfile_name(
CREDENTIALS_JSON, scopes=['https://www.googleapis.com/auth/androidpublisher']
)
print('Found credentials, trying to connect...')
socket.setdefaulttimeout(900)
service = build('androidpublisher', 'v3', credentials=credentials)
edit_response = service.edits().insert(body={}, packageName=package_name).execute()
edit_id = edit_response['id']
print('Inserted edit with ID', edit_id)
print('Uploading APK...')
apk_response = service.edits().apks().upload(
editId=edit_id, packag | eName=package_name, media_body | =apk_file
).execute()
print('Version code %d has been uploaded' % apk_response['versionCode'])
track_response = service.edits().tracks().patch(
editId=edit_id,
track=TRACK,
packageName=package_name,
body={
'releases': [{
'releaseNotes': [{
'text': releaseText,
'language': 'en-US'
}],
'versionCodes': [apk_response['versionCode']],
'userFraction': USER_FRACTION,
'status': 'inProgress',
}]
}
).execute()
print('Track %s is set with releases: %s' % (track_response['track'], str(track_response['releases'])))
if package_name == 'world.karrot':
assets = Path('../playstoreAssets')
language = 'en-US'
listing = assets / language / 'listing'
with (listing / 'shortDescription.txt').open() as shortDescription, \
(listing / 'fullDescription.txt').open() as fullDescription:
service.edits().listings().update(
editId=edit_id,
packageName=package_name,
language=language,
body={
'title': 'Karrot',
'language': language,
'shortDescription': shortDescription.read(),
'fullDescription': fullDescription.read(),
'video': '',
}
).execute()
print('Listing of %s has been updated' % package_name)
images_path = assets / language / 'images'
imageTypes = (
'featureGraphic',
'icon',
'phoneScreenshots',
'promoGraphic',
'sevenInchScreenshots',
'tenInchScreenshots',
'tvBanner',
'tvScreenshots',
'wearScreenshots',
)
images = [str(p) for p in images_path.iterdir()]
sha1 = subprocess.run(['sha1sum', *images], stdout=subprocess.PIPE).stdout.decode()
sha1_images = {sha1: path for (sha1, path) in [i.split() for i in sha1.splitlines()]}
for imageType in imageTypes:
our_images = {
sha1: path
for (sha1, path) in sha1_images.items() if path.split('/')[-1].startswith(imageType)
}
images_response = service.edits().images().list(
editId=edit_id,
packageName=package_name,
language=language,
imageType=imageType,
).execute()
their_images = images_response.get('images') or []
their_images = {i['sha1']: i['id'] for i in their_images}
to_upload = [our_images.get(k) for k in (our_images.keys() - their_images.keys())]
to_delete = [their_images.get(k) for k in (their_images.keys() - our_images.keys())]
for image_id in to_delete:
service.edits().images().delete(
editId=edit_id,
packageName=package_name,
language=language,
imageType=imageType,
imageId=image_id,
).execute()
print('Deleted', image_id)
for path in to_upload:
service.edits().images().upload(
editId=edit_id,
packageName=package_name,
language=language,
imageType=imageType,
media_body=path,
).execute()
print('Uploaded', path)
commit_request = service.edits().commit(editId=edit_id, packageName=package_name).execute()
print('Edit "%s" has been committed' % (commit_request['id']))
if __name__ == '__main__':
main(sys.argv)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.