prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
# -*- coding: utf-8 -*-
# Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistri | bute it and/or modify
# it under the terms of the GNU Affero | General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from django.contrib import admin
from django.contrib.admin.views.main import ChangeList
from django.core.urlresolvers import reverse
from subtitles.models import (get_lineage, SubtitleLanguage,
SubtitleVersion)
class SubtitleVersionInline(admin.TabularInline):
def has_delete_permission(self, request, obj=None):
# subtitle versions should be immutable, don't allow deletion
return False
model = SubtitleVersion
fields = ['version_number']
max_num = 0
class SubtitleLanguageAdmin(admin.ModelAdmin):
list_display = ['video_title', 'language_code', 'version_count', 'tip',
'unofficial_signoffs',
'official_signoffs',
'pending_collaborators',
'expired_pending_collaborators',
'unexpired_pending_collaborators',
'is_forked']
list_filter = ['created', 'language_code']
inlines = [SubtitleVersionInline]
search_fields = ['video__title', 'video__video_id', 'language_code']
raw_id_fields = ['video']
def unofficial_signoffs(self, o):
return o.unofficial_signoff_count
unofficial_signoffs.admin_order_field = 'unofficial_signoff_count'
def official_signoffs(self, o):
return o.official_signoff_count
official_signoffs.admin_order_field = 'official_signoff_count'
def pending_collaborators(self, o):
return o.pending_signoff_count
pending_collaborators.short_description = 'pending'
pending_collaborators.admin_order_field = 'pending_signoff_count'
def expired_pending_collaborators(self, o):
return o.pending_signoff_expired_count
expired_pending_collaborators.short_description = 'expired pending'
expired_pending_collaborators.admin_order_field = 'pending_signoff_expired_count'
def unexpired_pending_collaborators(self, o):
return o.pending_signoff_unexpired_count
unexpired_pending_collaborators.short_description = 'unexpired pending'
unexpired_pending_collaborators.admin_order_field = 'pending_signoff_unexpired_count'
def video_title(self, sl):
return sl.video.title_display()
video_title.short_description = 'video'
def version_count(self, sl):
return sl.subtitleversion_set.full().count()
version_count.short_description = 'number of versions'
def tip(self, sl):
ver = sl.get_tip(full=True)
return ver.version_number if ver else None
tip.short_description = 'tip version'
class SubtitleVersionChangeList(ChangeList):
def get_query_set(self, request):
qs = super(SubtitleVersionChangeList, self).get_query_set(request)
# for some reason using select_related makes MySQL choose an
# absolutely insane way to perform the query. Use prefetch_related()
# instead to work around this.
return qs.prefetch_related('video', 'subtitle_language')
class SubtitleVersionAdmin(admin.ModelAdmin):
list_per_page = 20
list_display = ['video_title', 'id', 'language', 'version_num',
'visibility', 'visibility_override',
'subtitle_count', 'created']
list_select_related = False
raw_id_fields = ['video', 'subtitle_language', 'parents', 'author']
list_filter = ['created', 'visibility', 'visibility_override',
'language_code']
list_editable = ['visibility', 'visibility_override']
search_fields = ['video__video_id', 'video__title', 'title',
'language_code', 'description', 'note']
# Unfortunately Django uses .all() on related managers instead of
# .get_query_set(). We've disabled .all() on SubtitleVersion managers so we
# can't let Django do this. This means we can't edit parents in the admin,
# but you should never be doing that anyway.
exclude = ['parents', 'serialized_subtitles']
readonly_fields = ['parent_versions']
# don't allow deletion
actions = []
def get_changelist(self, request, **kwargs):
return SubtitleVersionChangeList
def has_delete_permission(self, request, obj=None):
# subtitle versions should be immutable, don't allow deletion
return False
def version_num(self, sv):
return '#' + str(sv.version_number)
version_num.short_description = 'version #'
def video_title(self, sv):
return sv.video.title
video_title.short_description = 'video'
def language(self, sv):
return sv.subtitle_language.get_language_code_display()
def parent_versions(self, sv):
links = []
for parent in sv.parents.full():
href = reverse('admin:subtitles_subtitleversion_change',
args=(parent.pk,))
links.append('<a href="%s">%s</a>' % (href, parent))
return ', '.join(links)
parent_versions.allow_tags = True
# Hack to generate lineages properly when modifying versions in the admin
# interface. Maybe we should just disallow this entirely once the version
# models are hooked up everywhere else?
def response_change(self, request, obj):
response = super(SubtitleVersionAdmin, self).response_change(request, obj)
obj.lineage = get_lineage(obj.parents.full())
obj.save()
return response
def response_add(self, request, obj, *args, **kwargs):
response = super(SubtitleVersionAdmin, self).response_add(request, obj)
obj.lineage = get_lineage(obj.parents.full())
obj.save()
return response
# -----------------------------------------------------------------------------
admin.site.register(SubtitleLanguage, SubtitleLanguageAdmin)
admin.site.register(SubtitleVersion, SubtitleVersionAdmin)
|
= 'none') and (not infos['ask'] or \
infos['ask'] == 'none') and not infos['name'] and \
not infos['groups']:
# remove this useless item, it won't be shown in roster
# anyway
self.conn.connection.getRoster().delItem(jid)
elif jid != our_jid: # don't add our jid
self.roster[j] = raw_roster[jid]
else:
# Roster comes from DB
self.received_from_server = False
self.version = gajim.config.get_per('accounts', self.conn.name,
'roster_version')
self.roster = gajim.logger.get_roster(gajim.get_jid_from_account(
self.conn.name))
return True
class RosterSetReceivedEvent(nec.NetworkIncomingEvent):
name = 'roster-set-received'
base_network_events = []
def generate(self):
self.version = self.stanza.getTagAttr('query', 'ver')
self.items = {}
for item in self.stanza.getTag('query').getChildren():
try:
jid = helpers.parse_jid(item.getAttr('jid'))
except helpers.InvalidFormat:
log.warning('Invalid JID: %s, ignoring it' % item.getAttr('jid'))
continue
name = item.getAttr('name')
sub = item.getAttr('subscription')
ask = item.getAttr('ask')
groups = []
for group in item.getTags('group'):
groups.append(group.getData())
self.items[jid] = {'name': name, 'sub': sub, 'ask': ask,
'groups': groups}
if self.conn.connection and self.conn.connected > 1:
reply = nbxmpp.Iq(typ='result', attrs={'id': self.stanza.getID()},
to=self.stanza.getFrom(), frm=self.stanza.getTo(), xmlns=None)
self.conn.connection.send(reply)
return True
class RosterInfoEvent(nec.NetworkIncomingEvent):
name = 'roster-info'
base_network_events = []
class MucOwnerReceivedEvent(nec.NetworkIncomingEvent, HelperEvent):
name = 'muc-owner-received'
base_network_events = []
def generate(self):
self.get_jid_resource()
qp = self.stanza.getQueryPayload()
self.form_node = None
for q in qp:
if q.getNamespace() == nbxmpp.NS_DATA:
self.form_node = q
self.dataform = dataforms.ExtendForm(node=self.form_node)
return True
class MucAdminReceivedEvent(nec.NetworkIncomingEvent, HelperEvent):
name = 'muc-admin-received'
base_network_events = []
def generate(self):
self.get_jid_resource()
items = self.stanza.getTag('query',
namespace=nbxmpp.NS_MUC_ADMIN).getTags('item')
self.users_dict = {}
for item in items:
if item.has_attr('jid') and item.has_attr('affiliation'):
try:
jid = helpers.parse_jid(item.getAttr('jid'))
except helpers.InvalidFormat:
log.warning('Invalid JID: %s, ignoring it' % \
item.getAttr('jid'))
continue
affiliation = item.getAttr('affiliation')
self.users_dict[jid] = {'affiliation': affiliation}
if item.has_attr('nick'):
self.users_dict[jid]['nick'] = item.getAttr('nick')
if item.has_attr('role'):
self.users_dict[jid]['role'] = item.getAttr('role')
reason = item.getTagData('reason')
if reason:
self.users_dict[jid]['reason'] = reason
return True
class PrivateStorageReceivedEvent(nec.NetworkIncomingEvent):
name = 'private-storage-received'
base_network_events = []
def generate(self):
query = self.stanza.getTag('query')
self.storage_node = query.getTag('storage')
if self.storage_node:
self.namespace = self.storage_node.getNamespace()
return True
class BookmarksHelper:
def parse_bookmarks(self):
self.bookmarks | = []
confs = self.storage_node.getTags('conference')
for conf in confs:
autojoin_val = conf.getAttr('autojoin')
if autojoin_val is None: # not there (it's optional)
| autojoin_val = False
minimize_val = conf.getAttr('minimize')
if minimize_val is None: # not there (it's optional)
minimize_val = False
print_status = conf.getTagData('print_status')
if not print_status:
print_status = conf.getTagData('show_status')
try:
jid = helpers.parse_jid(conf.getAttr('jid'))
except helpers.InvalidFormat:
log.warning('Invalid JID: %s, ignoring it' % conf.getAttr('jid'))
continue
bm = {'name': conf.getAttr('name'),
'jid': jid,
'autojoin': autojoin_val,
'minimize': minimize_val,
'password': conf.getTagData('password'),
'nick': conf.getTagData('nick'),
'print_status': print_status}
bm_jids = [b['jid'] for b in self.bookmarks]
if bm['jid'] not in bm_jids:
self.bookmarks.append(bm)
class PrivateStorageBookmarksReceivedEvent(nec.NetworkIncomingEvent,
BookmarksHelper):
name = 'private-storage-bookmarks-received'
base_network_events = ['private-storage-received']
def generate(self):
self.conn = self.base_event.conn
self.storage_node = self.base_event.storage_node
if self.base_event.namespace != nbxmpp.NS_BOOKMARKS:
return
self.parse_bookmarks()
return True
class BookmarksReceivedEvent(nec.NetworkIncomingEvent):
name = 'bookmarks-received'
base_network_events = ['private-storage-bookmarks-received',
'pubsub-bookmarks-received']
def generate(self):
self.conn = self.base_event.conn
self.bookmarks = self.base_event.bookmarks
return True
class PrivateStorageRosternotesReceivedEvent(nec.NetworkIncomingEvent):
name = 'private-storage-rosternotes-received'
base_network_events = ['private-storage-received']
def generate(self):
self.conn = self.base_event.conn
if self.base_event.namespace != nbxmpp.NS_ROSTERNOTES:
return
notes = self.base_event.storage_node.getTags('note')
self.annotations = {}
for note in notes:
try:
jid = helpers.parse_jid(note.getAttr('jid'))
except helpers.InvalidFormat:
log.warning('Invalid JID: %s, ignoring it' % note.getAttr('jid'))
continue
annotation = note.getData()
self.annotations[jid] = annotation
if self.annotations:
return True
class RosternotesReceivedEvent(nec.NetworkIncomingEvent):
name = 'rosternotes-received'
base_network_events = ['private-storage-rosternotes-received']
def generate(self):
self.conn = self.base_event.conn
self.annotations = self.base_event.annotations
return True
class PubsubReceivedEvent(nec.NetworkIncomingEvent):
name = 'pubsub-received'
base_network_events = []
def generate(self):
self.pubsub_node = self.stanza.getTag('pubsub')
if not self.pubsub_node:
return
self.items_node = self.pubsub_node.getTag('items')
if not self.items_node:
return
self.item_node = self.items_node.getTag('item')
if not self.item_node:
return
children = self.item_node.getChildren()
if not children:
return
self.node = children[0]
return True
class PubsubBookmarksReceivedEvent(nec.NetworkIncomingEvent, BookmarksHelper):
name = 'pubsub-bookmarks-received'
base_network_events = ['pubsub-received']
def generate(self):
self.conn = self.base_event.conn
self.storage_node = self.base_event.node
ns = self.stora |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin
import os
import re
from stat import *
class Xen(Plugin, RedHatPlugin):
"""Xen virtualization
"""
plugin_name = 'xen'
profiles = ('virt',)
def determine_xen_host(self):
if os.access("/proc/acpi/dsdt", os.R_OK):
result = self.call_ext_prog("grep -qi xen /proc/acpi/dsdt")
if result['status'] == 0:
return "hvm"
if os.access("/proc/xen/capabilities", os.R_OK):
result = self.call_ext_prog(
"grep -q control_d /proc/xen/capabilities")
if result['status'] == 0:
return "dom0"
else:
return "domU"
return "baremetal"
def check_enabled(self):
return (self.determine_xen_host() == "baremetal")
def is_running_xenstored(self):
xs_pid = self.call_ext_prog("pidof xenstored")['output']
xs_pidnum = re.split('\n$', xs_pid)[0]
return xs_pidnum.isdigit()
def dom_collect_proc(self):
self.add_copy_spec([
"/proc/xen/balloon",
"/proc/xen/capabilities",
"/proc/xen/xsd_kva",
"/proc/xen/xsd_port"])
# determine if CPU has PAE support
self.add_cmd_output("grep pae /proc/cpuinfo")
# determine if CPU has Intel-VT or AMD-V support
self.add_cmd_output("egrep -e 'vmx|svm' /proc/cpuinfo")
def setup(self):
host_type = self.determine_xen_host()
if host_type == "dom | U":
# we should collect /proc/xen and /sys/hypervisor
self.dom_collect_proc()
# determine if hardware virtualization support is enabled
# in BIOS: /sys/hypervisor/properties/capabilities
| self.add_copy_spec("/sys/hypervisor")
elif host_type == "hvm":
# what do we collect here???
pass
elif host_type == "dom0":
# default of dom0, collect lots of system information
self.add_copy_spec([
"/var/log/xen",
"/etc/xen",
"/sys/hypervisor/version",
"/sys/hypervisor/compilation",
"/sys/hypervisor/properties",
"/sys/hypervisor/type"])
self.add_cmd_output([
"xm dmesg",
"xm info",
"xm list",
"xm list --long",
"brctl show"
])
self.dom_collect_proc()
if self.is_running_xenstored():
self.add_copy_spec("/sys/hypervisor/uuid")
self.add_cmd_output("xenstore-ls")
else:
# we need tdb instead of xenstore-ls if cannot get it.
self.add_copy_spec("/var/lib/xenstored/tdb")
# FIXME: we *might* want to collect things in /sys/bus/xen*,
# /sys/class/xen*, /sys/devices/xen*, /sys/modules/blk*,
# /sys/modules/net*, but I've never heard of them actually being
# useful, so I'll leave it out for now
else:
# for bare-metal, we don't have to do anything special
return # USEFUL
self.add_custom_text("Xen hostType: "+host_type)
# vim: set et ts=4 sw=4 :
|
# -*- coding: utf-8 -*-
## Copyright © 2012, Matthias Urlichs <matthias@urlichs.de>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License (included; see the file LICENSE)
## for more details.
##
from __future__ import division,absolute_import
from rainman.models import Model
from rainman.models.site import Site
from django.db import models as m
from django.db.models import Q
# Tables for environmental effects.
# Note that table names are different for Hysterical Raisins.
class EnvGroup(Model):
class Meta(Model.Meta):
unique_together = (("site", "name"),)
db_table="rainman_paramgroup"
def __unicode__(self):
return self.name
name = m.CharField(max_length=200)
comment = m.CharField(max_length=200,blank=True)
site = m.ForeignKey(Site,related_name="envgroups")
factor = m.FloatField(default=1.0, help_text="Base Factor")
rain = m.BooleanField(default=True,help_text="stop when it's raining?")
def __init__(self,*a,**k):
super(EnvGroup,self).__i | nit__(*a,**k)
self.env_cache = {}
def list_valves(self):
return u"¦".join((d.name for d in self.valves.all()))
def refresh(self):
super(EnvGroup,self).refresh()
self.env_cache = {}
def env_factor_one(self, tws, h):
p=4 # power factor, favoring nearest-neighbor
qtemp,qwind,qsun = tws
if qtemp and h.temp is None: return None
if qwind and h.wind is None: return None
if qsun and h.sun is None: return None
q=Q()
q &= Q( | temp__isnull=not qtemp)
q &= Q(wind__isnull=not qwind)
q &= Q(sun__isnull=not qsun)
sum_f = 0
sum_w = 0
try:
ec = self.env_cache[tws]
except KeyError:
self.env_cache[tws] = ec = list(self.items.filter(q))
for ef in ec:
d=0
if qtemp:
d += (h.temp-ef.temp)**2
if qwind:
d += (h.wind-ef.wind)**2
if qsun:
d += (h.sun-ef.sun)**2
d = d**(p*0.5)
if d < 0.001: # close enough
return ef.factor
sum_f += ef.factor/d
sum_w += 1/d
if not sum_w:
return None
return sum_f / sum_w
def env_factor(self, h, logger=None):
"""Calculate a weighted factor for history entry @h, based on the given environmental parameters"""
ql=(
(6,(True,True,True)),
(4,(False,True,True)),
(4,(True,False,True)),
(4,(True,True,False)),
(1,(True,False,False)),
(1,(False,True,False)),
(1,(False,False,True)),
)
sum_f = 1 # if there are no data, return 1
sum_w = 1
n = 1
for weight,tws in ql:
f = self.env_factor_one(tws,h)
if f is not None:
if logger:
logger("Simple factor %s%s%s: %f" % ("T" if tws[0] else "-", "W" if tws[1] else "-", "S" if tws[2] else "-", f))
sum_f *= f**weight
sum_w += weight
n += 1
return sum_f ** (n/sum_w)
@property
def schedules(self):
from rainman.models.schedule import Schedule
return Schedule.objects.filter(valve__envgroup=self)
class EnvItem(Model):
class Meta(Model.Meta):
db_table="rainman_environmenteffect"
def __unicode__(self):
return u"@%s %s¦%s¦%s" % (self.group.name,self.temp,self.wind,self.sun)
group = m.ForeignKey(EnvGroup,db_column="param_group_id",related_name="items")
factor = m.FloatField(default=1.0, help_text="Factor to use at this data point")
# these are single- or multi-dimensional data points for finding a reasonable factor
temp = m.FloatField(blank=True,null=True, help_text="average temperature (°C)")
wind = m.FloatField(blank=True,null=True, help_text="wind speed (m/s or whatever)")
sun = m.FloatField(blank=True,null=True, help_text="how much sunshine was there (0-1)") # measured value
|
import csv
from dateutil.parser import parse
from adoptarbol.tree.models import Tree
def load(filename):
with open(filename, encoding='utf-8') as f:
reader = csv.reader(f)
header = next(reader)
def pos_for(field):
return header.index(field)
def float_or_none(string):
try:
return(float(string))
except ValueError:
return None
for row in reader:
# codigo = str(row[pos_for('codigo')]),
print('Procesando ', row)
tree = {'code': row[pos_for('codigo')],
'common_name': row[pos_for('especie') | ],
'scientific_name': row[pos_for('cientifico')],
'family': row[pos_for('familia')],
'coord_utm_e': float_or_none(row[pos_for('utm_x')].replace(',', '.')),
'coord_utm_n': float_or_none(row[pos_for('utm_y')].replace(',', '.')),
'coord_utm_zone_letter': row[pos_for('utm_zone') | ],
'coord_utm_zone_n': row[pos_for('utm_south')],
'coord_lat': float_or_none(row[pos_for('lat')].replace(',', '.')),
'coord_lon': float_or_none(row[pos_for('long')].replace(',', '.')),
'photo': row[pos_for('fotos')],
'diameter': row[pos_for('dia')],
'height': row[pos_for('alt')],
'circ': row[pos_for('circ')],
'base_area': float_or_none(row[pos_for('areabasal')].replace(',', '.')),
'size_class': row[pos_for('clasetamano')],
'quality': float_or_none(row[pos_for('calidad')].replace(',', '.')),
'relevance': row[pos_for('relevancia')],
'notes': row[pos_for('notas')],
'phenology': row[pos_for('fenologia')],
'observation': row[pos_for('obs')],
'surveyed_on': parse(row[pos_for('fechahora')]),
}
t = Tree(**tree)
t.save()
"""
if __name__ == '__main__':
app = create_app(CONFIG)
manager = Manager(app)
with app.app_context():
load()
"""
|
from .endpoint import Endpoint
from .exceptions import MissingRequiredFieldError
from .fileuploads_endpoint import Fileuploads
from .. import RequestFactory, DatasourceItem, PaginationItem, ConnectionItem
import os
import logging
import copy
import cgi
from contextlib import closing
# The maximum size of a file that can be published in a single request is 64MB
FILESIZE_LIMIT = 1024 * 1024 * 64 # 64MB
ALLOWED_FILE_EXTENSIONS = ['tds', 'tdsx', 'tde']
logger = logging.getLogger('tableau.endpoint.datasources')
class Datasources(Endpoint):
@property
def baseurl(self):
return "{0}/sites/{1}/datasources".format(self.parent_srv.baseurl, self.parent_srv.site_id)
# Get all datasources
def get(self, req_options=None):
logger.info('Querying all datasources on site')
url = self.baseurl
server_response = self.get_request(url, req_options)
pagination_item = PaginationItem.from_response(server_response.content)
all_datasource_items = DatasourceItem.from_response(server_response.content)
return all_datasource_items, pagination_item
# Get 1 datasource by id
def get_by_id(self, datasource_id):
if not datasource_id:
error = "Datasource ID undefined."
raise ValueError(error)
logger.info('Querying single datasource (ID: {0})'.format(datasource_id))
url = "{0}/{1}".format(self.baseurl, datasource_id)
server_response = self.get_request(url)
return DatasourceItem.from_response(server_response.content)[0]
# Populate datasource item's connections
def populate_connections(self, datasource_item):
if not datasource_item.id:
error = 'Datasource item missing ID. Datasource must be retrieved from server first.'
raise MissingRequiredFieldError(error)
url = '{0}/{1}/connections'.format(self.baseurl, datasource_item.id)
server_response = self.get_request(url)
datasource_item._set_connections(ConnectionItem.from_response(server_response.content))
logger.info('Populated connections for datasource (ID: {0})'.format(datasource_item.id))
# Delete 1 datasource by id
def delete(self, datasource_id):
if not datasource_id:
error = "Datasource ID undefined."
rais | e ValueError(error)
url = "{0}/{1}".format(self.baseurl, datasource_id)
self.delete_request(url)
| logger.info('Deleted single datasource (ID: {0})'.format(datasource_id))
# Download 1 datasource by id
def download(self, datasource_id, filepath=None):
if not datasource_id:
error = "Datasource ID undefined."
raise ValueError(error)
url = "{0}/{1}/content".format(self.baseurl, datasource_id)
with closing(self.get_request(url, parameters={'stream': True})) as server_response:
_, params = cgi.parse_header(server_response.headers['Content-Disposition'])
filename = os.path.basename(params['filename'])
if filepath is None:
filepath = filename
elif os.path.isdir(filepath):
filepath = os.path.join(filepath, filename)
with open(filepath, 'wb') as f:
for chunk in server_response.iter_content(1024): # 1KB
f.write(chunk)
logger.info('Downloaded datasource to {0} (ID: {1})'.format(filepath, datasource_id))
return os.path.abspath(filepath)
# Update datasource
def update(self, datasource_item):
if not datasource_item.id:
error = 'Datasource item missing ID. Datasource must be retrieved from server first.'
raise MissingRequiredFieldError(error)
url = "{0}/{1}".format(self.baseurl, datasource_item.id)
update_req = RequestFactory.Datasource.update_req(datasource_item)
server_response = self.put_request(url, update_req)
logger.info('Updated datasource item (ID: {0})'.format(datasource_item.id))
updated_datasource = copy.copy(datasource_item)
return updated_datasource._parse_common_tags(server_response.content)
# Publish datasource
def publish(self, datasource_item, file_path, mode, connection_credentials=None):
if not os.path.isfile(file_path):
error = "File path does not lead to an existing file."
raise IOError(error)
if not mode or not hasattr(self.parent_srv.PublishMode, mode):
error = 'Invalid mode defined.'
raise ValueError(error)
filename = os.path.basename(file_path)
file_extension = os.path.splitext(filename)[1][1:]
# If name is not defined, grab the name from the file to publish
if not datasource_item.name:
datasource_item.name = os.path.splitext(filename)[0]
if file_extension not in ALLOWED_FILE_EXTENSIONS:
error = "Only {} files can be published as datasources.".format(', '.join(ALLOWED_FILE_EXTENSIONS))
raise ValueError(error)
# Construct the url with the defined mode
url = "{0}?datasourceType={1}".format(self.baseurl, file_extension)
if mode == self.parent_srv.PublishMode.Overwrite or mode == self.parent_srv.PublishMode.Append:
url += '&{0}=true'.format(mode.lower())
# Determine if chunking is required (64MB is the limit for single upload method)
if os.path.getsize(file_path) >= FILESIZE_LIMIT:
logger.info('Publishing {0} to server with chunking method (datasource over 64MB)'.format(filename))
upload_session_id = Fileuploads.upload_chunks(self.parent_srv, file_path)
url = "{0}&uploadSessionId={1}".format(url, upload_session_id)
xml_request, content_type = RequestFactory.Datasource.publish_req_chunked(datasource_item,
connection_credentials)
else:
logger.info('Publishing {0} to server'.format(filename))
with open(file_path, 'rb') as f:
file_contents = f.read()
xml_request, content_type = RequestFactory.Datasource.publish_req(datasource_item,
filename,
file_contents,
connection_credentials)
server_response = self.post_request(url, xml_request, content_type)
new_datasource = DatasourceItem.from_response(server_response.content)[0]
logger.info('Published {0} (ID: {1})'.format(filename, new_datasource.id))
return new_datasource
|
################
# Main
###################################################################################################
def main(argv=None):
''' Runs the program. There are three ways to pass arguments
1) environment variables TFB_*
2) configuration file benchmark.cfg
3) command line flags
In terms of precedence, 3 > 2 > 1, so config file trumps environment variables
but command line flags have the final say
'''
# Do argv default this way, as doing it in the functional declaration sets it at compile time
if argv is None:
argv = sys.argv
# Enable unbuffered output so messages will appear in the proper order with subprocess output.
sys.stdout=Unbuffered(sys.stdout)
# Update python environment
# 1) Ensure the current directory (which should be the benchmark home directory) is in the path so that the tests can be imported. |
sys.path.append('.')
# 2) Ensure toolset/setup/linux is in the path so that the tests can "import setup_util".
sys.path.append('toolset/setup/linux')
# Update environment for shell scripts
fwroot = setup_util.get_fwroot()
if not fwroot:
fwroot = os.getcwd()
setup_util.replace_environ(config='config/benchmark_profile', root=fwroot)
print "FWROOT is %s"%setup_util.get_fwroot()
conf_parser = ar | gparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False)
conf_parser.add_argument('--conf_file', default='benchmark.cfg', metavar='FILE', help='Optional configuration file to provide argument defaults. All config options can be overridden using the command line.')
args, remaining_argv = conf_parser.parse_known_args()
try:
with open (args.conf_file):
config = ConfigParser.SafeConfigParser()
config.read([os.getcwd() + '/' + args.conf_file])
defaults = dict(config.items("Defaults"))
# Convert strings into proper python types
for k,v in defaults.iteritems():
try:
defaults[k] = literal_eval(v)
except Exception:
pass
except IOError:
if args.conf_file != 'benchmark.cfg':
print 'Configuration file not found!'
defaults = { "client-host":"localhost"}
##########################################################
# Set up default values
##########################################################
serverHost = os.environ.get('TFB_SERVER_HOST')
clientHost = os.environ.get('TFB_CLIENT_HOST')
clientUser = os.environ.get('TFB_CLIENT_USER')
clientIden = os.environ.get('TFB_CLIENT_IDENTITY_FILE')
runnerUser = os.environ.get('TFB_RUNNER_USER')
databaHost = os.getenv('TFB_DATABASE_HOST', clientHost)
databaUser = os.getenv('TFB_DATABASE_USER', clientUser)
dbIdenFile = os.getenv('TFB_DATABASE_IDENTITY_FILE', clientIden)
maxThreads = 8
try:
maxThreads = multiprocessing.cpu_count()
except Exception:
pass
##########################################################
# Set up argument parser
##########################################################
parser = argparse.ArgumentParser(description="Install or run the Framework Benchmarks test suite.",
parents=[conf_parser],
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
epilog='''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms.
Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those
values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a
list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while
0:1:5 creates [0, 1, 2, 3, 4]
''')
# SSH options
parser.add_argument('-s', '--server-host', default=serverHost, help='The application server.')
parser.add_argument('-c', '--client-host', default=clientHost, help='The client / load generation server.')
parser.add_argument('-u', '--client-user', default=clientUser, help='The username to use for SSH to the client instance.')
parser.add_argument('-r', '--runner-user', default=runnerUser, help='The user to run each test as.')
parser.add_argument('-i', '--client-identity-file', dest='client_identity_file', default=clientIden,
help='The key to use for SSH to the client instance.')
parser.add_argument('-d', '--database-host', default=databaHost,
help='The database server. If not provided, defaults to the value of --client-host.')
parser.add_argument('--database-user', default=databaUser,
help='The username to use for SSH to the database instance. If not provided, defaults to the value of --client-user.')
parser.add_argument('--database-identity-file', default=dbIdenFile, dest='database_identity_file',
help='The key to use for SSH to the database instance. If not provided, defaults to the value of --client-identity-file.')
parser.add_argument('-p', dest='password_prompt', action='store_true', help='Prompt for password')
# Install options
parser.add_argument('--install', choices=['client', 'database', 'server', 'all'], default=None,
help='Runs installation script(s) before continuing on to execute the tests.')
parser.add_argument('--install-error-action', choices=['abort', 'continue'], default='continue', help='action to take in case of error during installation')
parser.add_argument('--install-strategy', choices=['unified', 'pertest'], default='unified',
help='''Affects : With unified, all server software is installed into a single directory.
With pertest each test gets its own installs directory, but installation takes longer''')
parser.add_argument('--install-only', action='store_true', default=False, help='Do not run benchmark or verification, just install and exit')
parser.add_argument('--clean', action='store_true', default=False, help='Removes the results directory')
parser.add_argument('--clean-all', action='store_true', dest='clean_all', default=False, help='Removes the results and installs directories')
# Test options
parser.add_argument('--test', nargs='+', help='names of tests to run')
parser.add_argument('--exclude', nargs='+', help='names of tests to exclude')
parser.add_argument('--type', choices=['all', 'json', 'db', 'query', 'fortune', 'update', 'plaintext'], default='all', help='which type of test to run')
parser.add_argument('-m', '--mode', choices=['benchmark', 'verify'], default='benchmark', help='verify mode will only start up the tests, curl the urls and shutdown')
parser.add_argument('--list-tests', action='store_true', default=False, help='lists all the known tests that can run')
parser.add_argument('--list-test-metadata', action='store_true', default=False, help='writes all the test metadata as a JSON file in the results directory')
parser.add_argument('--os', choices=['linux', 'windows'], default='linux', help='The operating system of the application/framework server (the one running' +
'this binary')
parser.add_argument('--database-os', choices=['linux', 'windows'], default='linux', help='The operating system of the database server.')
# Benchmark options
parser.add_argument('--concurrency-levels', default=[8, 16, 32, 64, 128, 256], help='Runs wrk benchmarker with different concurrency value (type int-sequence)', action=StoreSeqAction)
parser.add_argument('--query-levels', default=[1, 5,10,15,20], help='Database queries requested per HTTP connection, used during query test (type int-sequence)', action=StoreSeqAction)
parser.add_argument('--threads', default=maxThreads, help='Run wrk benchmarker with this many threads. This should probably be the number of cores for your client system', type=int)
parser.add_argument('--duration', default= |
from Cython.Compiler.ModuleNode import ModuleNode
from Cython.Compiler.Symtab import ModuleScope
from Cython.TestUtils import TransformTest
from Cython.Compiler.Visitor import MethodDispatcherTransform
from Cython.Compiler.ParseTreeTransforms import (
NormalizeTree, AnalyseDeclarationsTransform,
AnalyseExpressionsTransform, InterpretCompilerDirectives)
class TestMethodDispatcherTransfor | m(TransformTest):
_tree = None
| def _build_tree(self):
if self._tree is None:
context = None
def fake_module(node):
scope = ModuleScope('test', None, None)
return ModuleNode(node.pos, doc=None, body=node,
scope=scope, full_module_name='test',
directive_comments={})
pipeline = [
fake_module,
NormalizeTree(context),
InterpretCompilerDirectives(context, {}),
AnalyseDeclarationsTransform(context),
AnalyseExpressionsTransform(context),
]
self._tree = self.run_pipeline(pipeline, u"""
cdef bytes s = b'asdfg'
cdef dict d = {1:2}
x = s * 3
d.get('test')
""")
return self._tree
def test_builtin_method(self):
calls = [0]
class Test(MethodDispatcherTransform):
def _handle_simple_method_dict_get(self, node, func, args, unbound):
calls[0] += 1
return node
tree = self._build_tree()
Test(None)(tree)
self.assertEqual(1, calls[0])
def test_binop_method(self):
calls = {'bytes': 0, 'object': 0}
class Test(MethodDispatcherTransform):
def _handle_simple_method_bytes___mul__(self, node, func, args, unbound):
calls['bytes'] += 1
return node
def _handle_simple_method_object___mul__(self, node, func, args, unbound):
calls['object'] += 1
return node
tree = self._build_tree()
Test(None)(tree)
self.assertEqual(1, calls['bytes'])
self.assertEqual(0, calls['object'])
|
"""
Copyright (C) 2016 ECHO Wizard : Modded by TeamGREEN
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import xbmcgui
import urllib
import time
from urllib import FancyURLopener
import sys
class MyOpener(FancyURLopener):
version = "WhosTheDaddy?"
myopener = MyOpener()
urlretrieve = MyOpener().retrieve
urlopen = MyOpener().open
AddonTitle= "[COLORgreen]OptimusGREEN Tools[/COLOR]"
dialog = xbmcgui.Dialog()
def download(url, d | est, dp = None):
if not dp:
dp = xbmcgui.DialogProgress()
# dp.create("[COLORgold]Download In Progress[/COLOR]"' ',' ', ' ')
# dp.update(0)
start_time=time.time()
urlretrieve(url, dest, lambda nb, bs, fs: _pbhook(nb, bs, fs, dp, start_time))
def auto(url, dest, dp = None):
dp = xbmcgui.DialogProgress()
| start_time=time.time()
urlretrieve(url, dest, lambda nb, bs, fs: _pbhookauto(nb, bs, fs, dp, start_time))
def _pbhookauto(numblocks, blocksize, filesize, url, dp):
none = 0
def _pbhook(numblocks, blocksize, filesize, dp, start_time):
try:
percent = min(numblocks * blocksize * 100 / filesize, 100)
currently_downloaded = float(numblocks) * blocksize / (1024 * 1024)
kbps_speed = numblocks * blocksize / (time.time() - start_time)
if kbps_speed > 0:
eta = (filesize - numblocks * blocksize) / kbps_speed
else:
eta = 0
kbps_speed = kbps_speed / 1024
mbps_speed = kbps_speed / 1024
total = float(filesize) / (1024 * 1024)
mbs = '[COLOR green]%.02f MB[/COLOR] of [COLOR white][B]%.02f MB[/B][/COLOR]' % (currently_downloaded, total)
e = '[COLOR white][B]Speed: [/B][/COLOR][COLOR green]%.02f Mb/s ' % mbps_speed + '[/COLOR]'
e += '[COLOR white][B]ETA: [/B][/COLOR][COLOR green]%02d:%02d' % divmod(eta, 60) + '[/COLOR]'
# dp.update(percent, "",mbs, e)
except:
percent = 100
# dp.update(percent)
# if dp.iscanceled():
# dialog.ok(AddonTitle, 'The download was cancelled.')
# dp.close()
quit()
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 Andrea Cometa.
# Email: info@andreacometa.it
# Web site: http://www.andreacometa.it
# Copyright (C) 2012 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2012 Domsense srl (<http://www.domsense.com>)
# Copyright (C) 2012 Associazione OpenERP Italia
# (<http://www.odoo-italia.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be usefu | l,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class AccountConfigSettin | gs(models.TransientModel):
_inherit = 'account.config.settings'
due_cost_service_id = fields.Many2one(
related='company_id.due_cost_service_id',
help='Default Service for RiBa Due Cost (collection fees) on invoice',
domain=[('type', '=', 'service')])
def default_get(self, cr, uid, fields, context=None):
res = super(AccountConfigSettings, self).default_get(
cr, uid, fields, context)
if res:
user = self.pool['res.users'].browse(cr, uid, uid, context)
res['due_cost_service_id'] = user.company_id.due_cost_service_id.id
return res
class ResCompany(models.Model):
_inherit = 'res.company'
due_cost_service_id = fields.Many2one('product.product')
|
from notifications_utils.clients.antivirus.antivirus_client import (
AntivirusClient,
)
from notifications_utils.clients.redis.redis_c | lient import RedisClient
from notifications_utils.clients.zendesk.zendesk_client import ZendeskClient
antivirus_client = AntivirusClient() |
zendesk_client = ZendeskClient()
redis_client = RedisClient()
|
masterSection.ms('Replicating each segment during replay',
on_masters(lambda m: (
((m.master.replicationTicks - m.master.logSyncTicks) /
m.clockFrequency) /
math.ceil((m.master.replicationBytes - m.master.logSyncBytes) /
m.segmentSize) /
m.master.replicas)))
masterSection.ms('Replicating each segment during log sync',
on_masters(lambda m: (
(m.master.logSyncTicks / m.clockFrequency) /
math.ceil(m.master.logSyncBytes / m.segmentSize) /
m.master.replicas)))
masterSection.ms('RPC latency replicating each segment',
on_masters(lambda m: (
(m.master.backupCloseTicks + m.master.logSyncCloseTicks) /
m.clockFrequency /
(m.master.backupCloseCount + m.master.logSyncCloseCount))))
masterSection.ms('RPC latency replicating each segment during replay',
on_masters(lambda m: m.master.backupCloseTicks / m.clockFrequency /
m.master.backupCloseCount))
masterSection.ms('RPC latency replicating each segment during log sync',
on_masters(lambda m: m.master.logSyncCloseTicks / m.clockFrequency /
m.master.logSyncCloseCount))
master_ticks('Replication',
'master.replicationTicks')
master_ticks('Client RPCs Active',
'transport.clientRpcsActiveTicks')
masterSection.ms('Average GRD completion time',
on_masters(lambda m: (m.master.segmentReadTicks /
m.master.segmentReadCount /
m.clockFrequency)))
backupSection = report.add(Section('Backup Time'))
def backup_ticks(label, field):
"""This is a shortcut for adding to the backupSection a recorded number
of ticks that are a fraction of the total recovery.
@type label: string
@param label: the key for the line
@type field: string
@param field: the field within a backup's metrics that collected ticks
"""
backupSection.ms(label,
on_backups(lambda b: eval('b.' + field) /
b.clockFrequency),
total=recoveryTime)
backup_ticks('RPC service time',
'backup.serviceTicks')
backup_ticks('startReadingData RPC',
'rpc.backupStartReadingDataTicks')
backup_ticks('write RPC',
'rpc.backupWriteTicks')
backup_ticks('Open segment memset',
'backup.writeClearTicks')
backup_ticks('Write copy',
'backup.writeCopyTicks')
backupSection.ms('Other write RPC',
on_backups(lambda b: (b.rpc.backupWriteTicks -
b.backup.writeClearTicks -
b.backup.writeCopyTicks) /
b.clockFrequency),
total=recoveryTime)
backup_ticks('getRecoveryData RPC',
'rpc.backupGetRecoveryDataTicks')
backupSection.ms('Other',
on_backups(lambda b: (b.backup.serviceTicks -
b.rpc.backupStartReadingDataTicks -
b.rpc.backupWriteTicks -
b.rpc.backupGetRecoveryDataTicks) /
b.clockFrequency),
total=recoveryTime)
backup_ticks('Transmitting in transport',
'transport.transmit.ticks')
backup_ticks('Filtering segments',
'backup.filterTicks')
backup_ticks('Reading segments',
'backup.readingDataTicks')
backup_ticks('Using disk',
'backup.storageReadTicks')
backupSection.line('getRecoveryData completions',
on_backups(lambda b: b.backup.readCompletionCount))
backupSection.line('getRecoveryData retry fraction',
on_backups(lambda b: (b.rpc.backupGetRecoveryDataCount -
b.backup.readCompletionCount) /
b.rpc.backupGetRecoveryDataCount))
efficiencySection = report.add(Section('Efficiency'))
efficiencySection.line('recoverSegment CPU',
(sum([m.master.recoverSegmentTicks / m.clockFrequency
for m in masters]) * 1000 /
sum([m.master.segmentReadCount
for m in masters])),
unit='ms avg')
efficiencySection.line('Writing a segment',
(sum([b.rpc.backupWriteTicks / b.clockFrequency
for b in backups]) * 1000 /
# Divide count by 2 since each segment does two writes:
# one to open the segment and one to write the data.
sum([b.rpc.backupWriteCount / 2
for b in backups])),
unit='ms avg')
efficiencySection.line('Filtering a segment',
sum([b.backup.filterTicks / b.clockFrequency * 1000
for b in backups]) /
sum([b.backup.storageReadCount
for b in backups]),
unit='ms avg')
efficiencySection.line('Memory bandwidth (backup copies)',
on_backups(lambda b: (
(b.backup.writeCopyBytes / 2**30) /
(b.backup.writeCopyTicks / b.clockFrequency))),
unit='GB/s',
summaryFns=[AVG, MIN])
networkSection = report.add(Section('Network Utilization'))
networkSection.line('Aggregate',
(sum([host.transport.transmit.byteCount
for host in [coord] + masters + backups]) *
8 / 2**30 / recoveryTime),
unit='Gb/s',
summaryFns=[AVG, FRAC(data.totalNodes*25)])
networkSection.line('Master in',
on_masters(lambda m: (m.transport.receive.byteCount * 8 / 2**30) /
recoveryTime),
unit='Gb/s',
summaryFns=[AVG, MIN, SUM])
networkSection.line('Master out',
on_masters(lambda m: (m.transport.transmit.byteCount * 8 / 2**30) /
recoveryTime),
unit='Gb/s',
summaryFns=[AVG, MIN, SUM])
networkSection.line('Master out during replication',
on_masters(lambda m: (m.master.replicationBytes * 8 / 2**30) /
(m.master.replicationTicks / m.clockFrequency)),
unit='Gb/s',
summaryFns=[AVG, MIN, SUM])
networkSection.line('Master out during log sync',
on_masters(lambda m: (m.master.logSyncBytes * 8 / 2**30) /
(m.master.logSyncTicks / m.clockFrequency)),
unit='Gb/s',
summaryFns=[AVG, MIN, SUM])
networkSection.line('Backup in',
on_backups(lambda b: (b.transport.receive.byteCount * 8 / 2**30) /
recoveryTime),
unit='Gb/s',
summaryFns=[AVG, MIN, SUM])
networkSection.line('Backup out',
on_backups(lambda b: (b.transport.transmit.byteCount * 8 / 2**30) /
recoveryTime),
| unit='Gb/s',
summaryFns=[AVG, MIN, SUM])
diskSection = report.add(Section('Disk Utilization'))
diskSection.line('Effective bandwidth',
on_backups(lambda b: (b.backup.storageReadBytes +
b.backup.storageWriteBytes) /
2**20 / recoveryTime),
| unit='MB/s',
summaryFns=[AVG, MIN, SUM])
def active_bandwidth(b):
totalBytes = b.backup.storageReadBytes + b.backup.storageWriteBytes
totalTicks = b.backup.storageReadTicks + b.backup.storageWriteTicks
return ((totalBytes / 2**20) /
(totalTicks / b.clockFrequency))
diskSection.line('Active bandwidth',
on_backups(active_bandwidth),
unit='MB/s',
summaryFns=[AVG, MIN, SUM])
diskSection.line('Active bandwidth reading',
on_backups(lambda b: (b.backup.storageReadBytes / 2**20) /
(b.backup.storageReadTicks / b.clockFrequency)),
unit='MB/s',
summaryFns=[AVG, MIN, SUM])
diskSection.line('Active bandwidth writing',
on_backups(lambda b: (b.backup.storageWriteBytes / 2**20) /
(b.backup.storageWriteTicks / b.clockFrequency)),
unit='MB/s',
su |
"""
Classes used to model the roles used in the courseware. Each role is responsible for checking membership,
adding users, removing users, and listing members
"""
from abc import ABCMeta, abstractmethod
from django.contrib.auth.models import User, Group
from xmodule.modulestore import Location
from xmodule.modulestore.exceptions import InvalidLocationError, ItemNotFoundError
from xmodule.modulestore.django import loc_mapper
from xmodule.modulestore.locator import CourseLocator, Locator
class CourseContextRequired(Exception):
"""
Raised when a course_context is required to determine permissions
"""
pass
class AccessRole(object):
"""
Object representing a role with particular access to a resource
"""
__metaclass__ = ABCMeta
@abstractmethod
def has_user(self, user): # pylint: disable=unused-argument
"""
Return whether the supplied django user has access to this role.
"""
return False
@abstractmethod
def add_users(self, *users):
"""
Add the role to the supplied django users.
"""
pass
@abstractmethod
def remove_users(self, *users):
"""
Remove the role from the supplied django users.
"""
pass
@abstractmethod
def users_with_role(self):
"""
Return a django QuerySet for all of the users with this role
"""
return User.objects.none()
class GlobalStaff(AccessRole):
"""
The global staff role
"""
def has_user(self, user):
return user.is_staff
def add_users(self, *users):
for user in users:
user.is_staff = True
user.save()
def remove_users(self, *users):
for user in users:
user.is_staff = False
user.save()
def users_with_role(self):
raise Exception("This operation is un-indexed, and shouldn't be used")
class GroupBasedRole(AccessRole):
"""
A role based on membership to any of a set of groups.
"""
def __init__(self, group_names):
"""
Create a GroupBasedRole from a list of group names
The first element of `group_names` will be the preferred group
to use when adding a user to this Role.
If a user is a member of any of the groups in the list, then
they will be consider a member of the Role
"""
self._group_names = [name.lower() for name in group_names]
def has_user(self, user):
"""
Return whether the supplied django user has access to this role.
"""
# pylint: disable=protected-access
if not user.is_authenticated():
return False
if not hasattr(user, '_groups'):
user._groups = set(name.lower() for name in user.groups.values_list('name', flat=True))
return len(user._groups.intersection(self._group_names)) > 0
def add_users(self, *users):
"""
Add the supplied django users to this role.
"""
group, _ = Group.objects.get_or_create(name=self._group_names[0])
group.user_set.add(*users)
| for user in users:
if hasattr(user, '_groups'):
del user._groups
def remove_users(self, *users):
"""
Remove the supplied django users from this role.
"""
group, _ = Group.objects.get_or_create(name=self._group_names[0])
group.user_se | t.remove(*users)
for user in users:
if hasattr(user, '_groups'):
del user._groups
def users_with_role(self):
"""
Return a django QuerySet for all of the users with this role
"""
return User.objects.filter(groups__name__in=self._group_names)
class CourseRole(GroupBasedRole):
"""
A named role in a particular course
"""
def __init__(self, role, location, course_context=None):
"""
Location may be either a Location, a string, dict, or tuple which Location will accept
in its constructor, or a CourseLocator. Handle all these giving some preference to
the preferred naming.
"""
# TODO: figure out how to make the group name generation lazy so it doesn't force the
# loc mapping?
location = Locator.to_locator_or_location(location)
# direct copy from auth.authz.get_all_course_role_groupnames will refactor to one impl asap
groupnames = []
# pylint: disable=no-member
if isinstance(location, Location):
try:
groupnames.append('{0}_{1}'.format(role, location.course_id))
except InvalidLocationError: # will occur on old locations where location is not of category course
if course_context is None:
raise CourseContextRequired()
else:
groupnames.append('{0}_{1}'.format(role, course_context))
try:
locator = loc_mapper().translate_location(location.course_id, location, False, False)
groupnames.append('{0}_{1}'.format(role, locator.package_id))
except (InvalidLocationError, ItemNotFoundError):
# if it's never been mapped, the auth won't be via the Locator syntax
pass
# least preferred legacy role_course format
groupnames.append('{0}_{1}'.format(role, location.course))
elif isinstance(location, CourseLocator):
groupnames.append('{0}_{1}'.format(role, location.package_id))
# handle old Location syntax
old_location = loc_mapper().translate_locator_to_location(location, get_course=True)
if old_location:
# the slashified version of the course_id (myu/mycourse/myrun)
groupnames.append('{0}_{1}'.format(role, old_location.course_id))
# add the least desirable but sometimes occurring format.
groupnames.append('{0}_{1}'.format(role, old_location.course))
super(CourseRole, self).__init__(groupnames)
class OrgRole(GroupBasedRole):
"""
A named role in a particular org
"""
def __init__(self, role, location):
# pylint: disable=no-member
location = Location(location)
super(OrgRole, self).__init__(['{}_{}'.format(role, location.org)])
class CourseStaffRole(CourseRole):
"""A Staff member of a course"""
def __init__(self, *args, **kwargs):
super(CourseStaffRole, self).__init__('staff', *args, **kwargs)
class CourseInstructorRole(CourseRole):
"""A course Instructor"""
def __init__(self, *args, **kwargs):
super(CourseInstructorRole, self).__init__('instructor', *args, **kwargs)
class CourseBetaTesterRole(CourseRole):
"""A course Beta Tester"""
def __init__(self, *args, **kwargs):
super(CourseBetaTesterRole, self).__init__('beta_testers', *args, **kwargs)
class OrgStaffRole(OrgRole):
"""An organization staff member"""
def __init__(self, *args, **kwargs):
super(OrgStaffRole, self).__init__('staff', *args, **kwargs)
class OrgInstructorRole(OrgRole):
"""An organization instructor"""
def __init__(self, *args, **kwargs):
super(OrgInstructorRole, self).__init__('instructor', *args, **kwargs)
|
#!/usr/bin/python3
# -*- encoding: utf-8 -*-
import unittest
from model import *
from example_data import expenses, payments, participations, persons, events
kasse = Gruppenkasse.create_new()
kasse.fill_with(expenses, payments, participations)
class T | estGruppenkasse(unittest.TestCase):
def setUp(self):
...
def test_persons(self | ):
person_names = list(map(lambda p: p.name, kasse.persons))
for name in person_names:
self.assertTrue(name in persons, msg=name)
def test_events(self):
print(kasse.person_dict)
event_names = list(map(lambda p: p.name, kasse.events))
for name in event_names:
self.assertTrue(name in events, msg=name)
for name in events:
self.assertTrue(name in event_names, msg=name)
def test_event(self):
for event in kasse.events:
...#print(event)
def test_person(self):
for person in kasse.persons:
print(person, "\t{:5.2f}".format(person.balance / 100))
def test_payments(self):
print(kasse.payments)
if __name__ == '__main__':
unittest.main()
|
tories import WikiFactory, WikiVersionFactory
from api.base.settings.defaults import API_BASE
from api_tests.wikis.views.test_wiki_detail import WikiCRUDTestCase
from framework.auth.core import Auth
from osf_tests.factories import (
AuthUserFactory,
ProjectFactory,
OSFGroupFactory,
RegistrationFactory,
)
from osf.utils.permissions import WRITE, READ
from tests.base import fake
@pytest.fixture()
def user():
return AuthUserFactory()
def create_wiki_payload(name):
return {
'data': {
'type': 'wikis',
'attributes': {
'name': name
}
}
}
@pytest.mark.django_db
class TestNodeWikiList:
@pytest.fixture()
def add_project_wiki_page(self):
def add_page(node, user):
with mock.patch('osf.models.AbstractNode.update_search'):
wiki_page = WikiFactory(node=node, user=user)
WikiVersionFactory(wiki_page=wiki_page)
return wiki_page
return add_page
@pytest.fixture()
def non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def public_project(self, user):
return ProjectFactory(is_public=True, creator=user)
@pytest.fixture()
def public_wiki(self, add_project_wiki_page, user, public_project):
return add_project_wiki_page(public_project, user)
@pytest.fixture()
def public_url(self, public_project, public_wiki):
return '/{}nodes/{}/wikis/'.format(API_BASE, public_project._id)
@pytest.fixture()
def private_project(self, user):
return ProjectFactory(creator=user)
@pytest.fixture()
def private_wiki(self, add_project_wiki_page, user, private_project):
return add_project_wiki_page(private_project, user)
@pytest.fixture()
def private_url(self, private_project, private_wiki):
return '/{}nodes/{}/wikis/'.format(API_BASE, private_project._id)
@pytest.fixture()
def public_registration(self, user, public_project, public_wiki):
public_registration = RegistrationFactory(
project=public_project, user=user, is_public=True)
return public_registration
@pytest.fixture()
def public_registration_url(self, public_registration):
return '/{}registrations/{}/wikis/'.format(
API_BASE, public_registration._id)
@pytest.fixture()
def private_registration(self, user, private_project, private_wiki):
private_registration = RegistrationFactory(
project=private_project, user=user)
return private_registration
@pytest.fixture()
def private_registration_url(self, private_registration):
return '/{}registrations/{}/wikis/'.format(
API_BASE, private_registration._id)
def test_return_wikis(
self, app, user, non_contrib, private_registration, private_project,
public_wiki, private_wiki, public_url, private_url,
private_registration_url):
# test_return_public_node_wikis_logged_out_user
res = app.get(public_url)
assert res.status_code == 200
wiki_ids = [wiki['id'] for wiki in res.json['data']]
assert public_wiki._id in wiki_ids
# test_return_public_node_wikis_logged_in_non_contributor
res = app.get(public_url, auth=non_contrib.auth)
assert res.status_code == 200
wiki_ids = [wiki['id'] for wiki in res.json['data']]
assert public_wiki._id in wiki_ids
# test_return_public_node_wikis_logged_in_contributor
res = app.get(public_url, auth=user.auth)
assert res.status_code == 200
wiki_ids = [wiki['id'] for wiki in res.json['data']]
assert public_wiki._id in wiki_ids
# test_return_private_node_wikis_logged_out_user
res = app.get(private_url, expect_errors=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
# test_return_private_node_wikis_logged_in_osf_group_member
group_mem = AuthUserFactory()
group = OSFGroupFactory(creator=group_mem)
private_project.add_osf_group(group, READ)
res = app.get(private_url, auth=group_mem.auth)
assert res.status_code == 200
wiki_ids = [wiki['id'] for wiki in res.json['data']]
assert private_wiki._id in wiki_ids
# test_return_private_node_wikis_logged_in_non_contributor
res = app.get(private_url, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_return_private_node_wikis_logged_in_contributor
res = app.get(private_url, auth=user.auth)
assert res.status_code == 200
wiki_ids = [wiki['id'] for wiki in res.json['data']]
assert private_wiki._id in wiki_ids
# test_return_registration_wikis_logged_out_user
res = app.get(private_registration_url, expect_errors=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
# test_return_registration_wikis_logged_in_non_contributor
res = app.get(
private_registration_url,
auth=non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_return_registration_wikis_logged_in_contributor
res = app.get(private_registration_url, auth=user.auth)
assert res.status_code == 200
wiki_ids = [wiki['id'] for wiki in res.json['data']]
assert WikiPage.objects.get_for_node(private_registration, 'home')._id in wiki_ids
def test_wikis_not_returned_for_withdrawn_registration(
self, app, user, private_registration, private_registration_url):
private_registration.is_public = True
withdrawal = private_registration.retract_registration(
user=user, save=True)
token = list(withdrawal.approval_state.values())[0]['approval_token']
# TODO: Remove mocking when StoredFileNode is implemented
with mock.patch('osf.models.AbstractNode.update_search'):
withdrawal.approve_retraction(user, token)
withdrawal.save()
res = app.get(
private_registratio | n_url,
auth=user.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
def test_do_not_return_disabled_wiki(self, app, user, public_url, public_project):
public_project.delete_addon('wiki', auth=Auth(user))
res = app.get(public_url, expect_errors=True)
assert res.status_code == 404
def test_relat | ionship_links(
self, app, user, public_project, private_project,
public_registration, private_registration,
public_url, private_url, public_registration_url,
private_registration_url):
# test_public_node_wikis_relationship_links
res = app.get(public_url)
expected_nodes_relationship_url = '{}nodes/{}/'.format(
API_BASE, public_project._id)
expected_comments_relationship_url = '{}nodes/{}/comments/'.format(
API_BASE, public_project._id)
assert expected_nodes_relationship_url in res.json['data'][
0]['relationships']['node']['links']['related']['href']
assert expected_comments_relationship_url in res.json['data'][
0]['relationships']['comments']['links']['related']['href']
# test_private_node_wikis_relationship_links
res = app.get(private_url, auth=user.auth)
expected_nodes_relationship_url = '{}nodes/{}/'.format(
API_BASE, private_project._id)
expected_comments_relationship_url = '{}nodes/{}/comments/'.format(
API_BASE, private_project._id)
assert expected_nodes_relationship_url in res.json['data'][
0]['relationships']['node'][ |
from __future__ import divisio | n
# Read two integers from STDIN
a = int(raw_input())
b = int(raw_input())
# Print integer division, a//b
print(a // b)
# Print float division, a/b
print(a % b)
# Print divmod of a and b
print(d | ivmod(a, b))
|
le ==
# The specific variables and commands will be defined later, here are
# examples on what the commands looks like in bytes.
# This is an example of a command to toggle verbose setting.
# Command is write (0x57), variable is 13 (0x0d)
# and value is 0. The footer is x10 x03
# 0x02 0x57 0x0xd 0x00 0x10 0x03
POS_STX = slice(0, 1)
POS_DLE_ETX = slice(-2, None)
POS_CMD_VAR = slice(2, 3)
POS_REPLY_VAR = slice(1, 2)
POS_REPLY_DATA = slice(2, -2)
BYTE_STX = '\x02'
BYTE_WRITE = '\x57'
BYTE_READ = '\x52'
BYTE_DLE_ETX = '\x10\x03'
INDEX_CMD = 0
INDEX_VARIABLE = 1
INDEX_REPLY = 2
INDEX_WAIT = 3
PRIMARE_CMD = {
'power_toggle': ['W', '0100', '01', True],
'power_set': ['W', '81YY', '01YY', False],
'input_set': ['W', '82YY', '02YY', True],
'input_next': ['W', '0201', '02', True],
'input_prev': ['W', '02FF', '02', True],
'volume_set': ['W', '83YY', '03YY', True],
'volume_get': ['W', '0300', '03', True],
'volume_up': ['W', '0301', '03', True],
'volume_down': ['W', '03FF', '03', True],
'balance_adjust': ['W', '04YY', '04', True],
'balance_set': ['W', '84YY', '04YY', True],
'mute_toggle': ['W', '0900', '09', True],
'mute_set': ['W', '89YY', '09YY', True],
'dim_cycle': ['W', '0A00', '0A', True],
'dim_set': ['W', '8AYY', '0AYY', True],
'verbose_toggle': ['W', '0D00', '0D', True],
'verbose_set': ['W', '8DYY', '0DYY', True],
'menu_toggle': ['W', '0E01', '0E', True],
'menu_set': ['W', '8EYY', '0EYY', True],
'remote_cmd': ['W', '0FYY', 'YY', True],
'ir_input_toggle': ['W', '1200', '12', True],
'ir_input_set': ['W', '92YY', '12YY', True],
'recall_factory_settings': ['R', '1300', '', False],
'inputname_current_get': ['R', '1400', '14YY', True],
'inputname_specific_get': ['R', '94YY', '94YY', True],
'manufacturer_get': ['R', '1500', '15', True],
'modelname_get': ['R', '1600', '16', True],
'swversion_get': ['R', '1700', '17', True]
}
PRIMARE_REPLY = {
'01': 'power',
'02': 'input',
'03': 'volume',
'04': 'balance',
'09': 'mute',
'0a': 'dim',
'0d': 'verbose',
'0e': 'menu',
'12': 'ir_input',
'13': 'recall_factory_settings',
'14': 'inputname',
'15': 'manufacturer',
'16': 'modelname',
'17': 'swversion'
}
# TODO:
# FIXING Better reply handling than table?
# * Better error handling
# After suspend/resume, if volume up/down fails (or similar),
# try turning amp on
#
# LATER
# * v2: Implement as module(?), not class, for multiple writers/subscribers
# (singleton)
# Seems like a factory would be better, so 'import primare_serial' then
# primare_serial.initComs() which then creates the single Serial object.
# * v2: Add notification callback mechanism to notify users of changes on
# amp (dials or other SW)
# http://bit.ly/WGRn0g
# Better idea: websocket
# http://forums.lantronix.com/showthread.php?p=3131
# * ...
class PrimareController():
"""This class provides methods for controlling a Primare amplifier."""
# Number of volume levels the amplifier supports.
# Primare amplifiers have 79 levels
VOLUME_LEVELS = 79
def __init__(self, source=None, volume=None, writer=None):
"""Initialization."""
self._bytes_read = bytearray()
self._write_cb = writer
self._boot_print = True
self._manufacturer = ''
self._modelname = ''
self._swversion = ''
self._inputname = ''
self._source = source
# Volume in range 0..VOLUME_LEVELS. :class:`None` before calibration.
if volume:
self.volume_set(volume)
# Setup logging so that is available
logging.basicConfig(level=logging.DEBUG)
# Private methods
def _set_device_to_known_state(self):
logger.debug('_set_device_to_known_state')
self.verbose_set(True)
self.power_on()
time.sleep(1)
if self._source is not None:
self.input_set(self._source)
self.mute_set(False)
def _print_device_info(self):
self.manufacturer_get()
self.modelname_get()
self.swversion_get()
# We always get inputname last, this represents our initialization
self.inputname_current_get()
def _primare_reader(self, rawdata):
r"""Take raw data and finds the EOL sequence \x10\x03."""
eol = BYTE_DLE_ETX
leneol = len(eol)
for index, c in enumerate(rawdata):
self._bytes_read += c
# TODO: Need to do conversion of \x10\x10 before looking for EOL!
# Doing it after is actually wrong, move code up here from
# _decode_raw_data
if self._bytes_read[-leneol:] == eol:
logger.debug('_primare_reader - decoded: %s',
binascii.hexlify(self._bytes_read))
variable_char, decoded_data = self._decode_raw_data(
self._bytes_read)
# We found a data sequence, extract remaining data and start
# again
rawdata = rawdata[index + 1:]
self._bytes_read = bytearray()
self._parse_and_store(variable_char, decoded_data)
else:
# logger.debug('_primare_reader - not-eol: %s',
# binascii.hexlify(self._bytes_read[-leneol:]))
pass
def _decode_raw_data(self, rawdata):
r"""Decode raw data from the serial port.
Replace any '\x10\x10' sequences with '\x10'.
Returns the variable char and the data received between the STX and
DLE+ETX markers
"""
variable_char = ''
data = ''
# logger.debug('Read: "%s"', binascii.hexlify(rawdata))
byte_string = struct.unpack('c' * len(rawdata), rawdata)
variable_char = binascii.hexlify(''.join(byte_string[POS_REPLY_VAR]))
byte_string = byte_string[POS_REPLY_DATA]
# We need to replace double DLE (0x10) with single DLE
for byte_pairs in zip(byte_string[0:None:2],
byte_string[1:None:2]):
# Convert binary tuple to str to ascii
str_pairs = binascii.hexlify(''.join(byte_pairs))
if str_pairs == '1010':
data += '10'
else:
data += str_pairs
# Very often we have an odd amount of data which not handled by
# the zip above, manually append that one byte
if len(byte_string) % 2 != 0:
data += binascii.hexlify(byte_string[-1])
logger.debug('Read(%s) = %s (%s)', PRIMARE_REPLY[variable_char], data,
binascii.hexlify(rawdata))
return variable_char, data
def _parse_and_store(self, variable_char, data):
if variable_char in ['01', '14', '15', '16', '17']:
if variable_char in ['14', '15', '16', '17']:
logger.debug('_parse_and_store - index: "%s" - %s',
variable_char,
| binascii.unhexlify(data))
if variable_char == '01':
self._power_state = int(data, 16)
elif variable_char == '14':
| self._inputname = data
if self._boot_print is True:
self._boot_print = False
logger.info("""Connected to:
Manufacturer: %s
Model: %s
SW Version: %s
Current input: %s """,
binascii.unhexlify(self._manufacturer),
binascii.unhexlify(self._modelname),
binascii.unhexlify(self._swversion),
binascii.unhexlify(self._inputname))
elif variable_char == '15':
self._manufacturer = data
elif variable_char == '16':
self._modelname = data
elif variable_char == '17':
self._swversion = data
def _send_command(self, v |
from monitor import monitor_qlen
from subprocess import Popen, PIPE
from time import sleep, time
from multiprocessing import Process
from argparse import ArgumentParser
|
import sys
import os
parser = ArgumentParser(description="CWND/Queue Monitor")
parser.add_argument('--exp', '-e',
dest="exp",
action="store",
help="Name of the Experiment",
required=True)
# Expt parameters
args = parser.parse_args()
def start_tcpprobe():
"Install tcp_pobe module and dump to file"
os.system("(rmmod tcp_pr | obe >/dev/null 2>&1); modprobe tcp_probe full=1;")
print "Monitoring TCP CWND ... will save it to ./%s_tcpprobe.txt " % args.exp
Popen("cat /proc/net/tcpprobe > ./%s_tcpprobe.txt" %
args.exp, shell=True)
def qmon():
monitor = Process(target=monitor_qlen,args=('s0-eth2', 0.01, '%s_sw0-qlen.txt' % args.exp ))
monitor.start()
print "Monitoring Queue Occupancy ... will save it to %s_sw0-qlen.txt " % args.exp
raw_input('Press Enter key to stop the monitor--> ')
monitor.terminate()
if __name__ == '__main__':
start_tcpprobe()
qmon()
Popen("killall -9 cat", shell=True).wait()
|
# -*- coding: utf-8 -*
from pymeasure.instruments.pyvisa_instrument import PyVisaInstrument
from pymeasure.case import ChannelRead
from pymeasure.instruments.oxford import OxfordInstrument
import time
class _QxfordILMChannel(ChannelRead):
def __init__(self, instrument):
ChannelRead.__init__(self)
self._instrument = instrument
self.unit = 'percent'
self._config += ['fast']
@ChannelRead._readmethod
def read(self):
while True:
helium = self._instrument.query('R')
helium = helium[2:]
if len(helium) == 4:
break
return [float(helium)/10]
@property
def fast(self):
while True:
status = self._instrument.query('X')
| status = status[5]
if status == '4' or status == 'C':
return False
elif status == '2' or status == '3' or status == 'A' :
return True
else:
time.sleep(1)
p | ass
@fast.setter
def fast(self, boolean):
if boolean:
self._instrument.write('T1')
else:
self._instrument.write('S1')
class QxfordILM(PyVisaInstrument):
def __init__(self, address, name='', reset=True, defaults=True, isobus=6, **pyvisa):
super().__init__(address, name, **pyvisa)
self._isobus = isobus
self._instrument = OxfordInstrument(self._instrument, isobus = self._isobus)
self._instrument.timeout = 200
self._instrument.read_termination = '\r'
self._instrument.write_termination = '\r'
self._instrument.write('C3')
# Channels
self.__setitem__('helium', _QxfordILMChannel(self._instrument))
if defaults is True:
self.defaults()
#@property
#def status(self):
# return self._instrument.ask('X')
def defaults(self):
pass
|
#
# The Python Imaging Library.
# $Id: MicImagePlugin.py,v 1.2 2007/06/17 14:12:15 robertoconnor Exp $
#
# Microsoft Image Composer support for PIL
#
# Notes:
# uses TiffImagePlugin.py to read the actual image streams
#
# History:
# 97-01-20 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.1"
import string
import Image, TiffImagePlugin
from OleFileIO import *
#
# --------------------------------------------------------------------
def _accept(prefix):
return prefix[:8] == MAGIC
##
# Image plugin for Microsoft's Image Composer file format.
class MicImageFile(TiffImagePlugin.TiffImageFile):
format = "MIC"
format_description = "Microsoft Image Composer"
def _open(self):
# read the OLE directory and see if this is a likely
# to be a Microsoft Image Composer file
try:
self.ole = OleFileIO(self.fp)
except IOError:
raise SyntaxError, "not an MIC file; invalid OLE file"
# find ACI subfiles with Image members (maybe not the
# best way to identify MIC files, but what the... ;-)
self.images = []
for file in self.ole.listdir():
if file[1:] and file[0][-4:] == ".ACI" and file[1] == "Image":
self.images.append(file)
# if we didn't find any images, this is probably not
# an MIC file.
if not self.images:
raise SyntaxError, "not an MIC file; no image entries"
| self.__fp = self.fp
self.frame = 0
if len(self.images) > 1:
self.category = Image.CONTAINER
self.seek(0)
def seek(self, frame):
try:
filename = self.images[frame]
except IndexError:
raise EOFError, "no such frame"
self.fp = self.ole.openstream(filename)
TiffImagePlugi | n.TiffImageFile._open(self)
self.frame = frame
def tell(self):
return self.frame
#
# --------------------------------------------------------------------
Image.register_open("MIC", MicImageFile, _accept)
Image.register_extension("MIC", ".mic")
|
#------------------------------------------------------------------------------
# Copyright (C) 2009 Richard Lincoln
#
# This program is free software; you can redistribute it and/or modify it under
# the te | rms of the GNU Affero General Public License as published by the Free
# Software Foundation; version 2 dated June, | 1991.
#
# This software is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANDABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#------------------------------------------------------------------------------
""" A manually operated or motor operated mechanical switching device used for changing the connections in a circuit, or for isolating a circuit or equipment from a source of power. It is required to open or close circuits when negligible current is broken or made.
"""
# <<< imports
# @generated
from cdpsm.iec61970.wires.switch import Switch
from google.appengine.ext import db
# >>> imports
class Disconnector(Switch):
""" A manually operated or motor operated mechanical switching device used for changing the connections in a circuit, or for isolating a circuit or equipment from a source of power. It is required to open or close circuits when negligible current is broken or made.
"""
# <<< disconnector.attributes
# @generated
# >>> disconnector.attributes
# <<< disconnector.references
# @generated
# >>> disconnector.references
# <<< disconnector.operations
# @generated
# >>> disconnector.operations
# EOF -------------------------------------------------------------------------
|
from __future__ import absolute_import, unicode_literals
import os
from appconf i | mport AppConf
from django.conf import settings # noqa
class SessionRedisConf(AppConf):
HOST = '127.0.0.1'
PORT = 6379
DB = 0
PREFIX = 'django_sessions'
PASSWORD = None
UNIX_DOMAIN_SOCKET_PATH = None
URL = None
CONNECTION_POOL = None
JSON_ENCODING = 'latin-1'
ENV_URLS = (
'REDISCLOUD_URL',
'REDISTOGO_UR | L',
'OPENREDIS_URL',
'REDISGREEN_URL',
'MYREDIS_URL',
)
def configure(self):
if self.configured_data['URL'] is None:
for url in self.configured_data['ENV_URLS']:
redis_env_url = os.environ.get(url)
if redis_env_url:
self.configured_data['URL'] = redis_env_url
break
return self.configured_data
class Meta:
prefix = 'session_redis'
|
"""
Compute class
"""
import numpy as np
import matplotlib
matplotlib.use('Agg')
import pylab as pl
#TODO: Take care of the looks of these plots
class Compute(object):
"""
Abstract compute class. It will never be used, but is parent of all
the different computes.
"""
def __init__(self):
"""
Constructor. Not clear what to do here
"""
self.value = 0
self.idx = 0
self.header = []
def compute(self, system):
"""
Compute routine
"""
pass
def tally(self, value):
"""
Tally new compute with the previous ones. Mostly be | cause not all
o | f the computes have the same structure, so the "average" is not
standard. By default we do the usual average.
"""
self.idx += 1
self.value *= (self.idx - 1)/self.idx
self.value += value/self.idx
def zero(self):
"""
Zero out current tallies.
"""
self.value = 0
self.idx = 0
def log(self, filename):
"""
Logging routine. By default we just write self.value to filename,
with self.header
"""
np.savetxt(filename, self.value, header='; '.join(self.header))
def plot(self, filename):
"""
Plotting routine. By default we plot every column [1:] as a
function of column 0, setting labels and axis names with
self.header and save it to filename.
"""
fig, axis = pl.subplots()
for i, vec in enumerate(self.value.T[1:]):
axis.plot(self.value[:, 0], vec, label=self.header[i])
axis.set_xlabel(self.header[0])
fig.savefig('{0}.pdf'.format(filename))
pl.close()
|
from django.core.files.storage import default_storage
from django.forms import widgets
from django.urls import reverse
from repanier.const import EMPTY_STRING
from repanier.picture.const import SIZE_M
from repanier.tools import get_repanier_template_name
class RepanierPictureWidget(widgets.TextInput):
template_name = get_repanier_template_name("widgets/picture.html")
def __init__(self, *args, **kwargs):
self.upload_to = kwargs.pop("upload_to", "pictures")
self.size = kwargs.pop("size", SIZE_M)
self.bootstrap = kwargs.pop("bootstrap", False)
super().__init__(*args, **kwargs)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context["upload_url"] = reverse(
"repanier:ajax_picture", args=(self.upload_to, self.size)
)
if value:
context["repanier_file_path"] = file_path = str | (value)
context["repanier_display_picture"] = "inline"
context["repanier_display_upload"] = "none"
context["repanier_file_url"] = default_storage.url(file_path)
else:
context["repanier_file_path"] = EMPTY_STRING
context["repanier_display_picture"] = "none"
context["repanier_display_upload"] = "inline"
context["repanier_file_url"] = EMPTY_STRING
context["repanier_height"] = context["repanier_width"] = | self.size
context["bootstrap"] = self.bootstrap
return context
class Media:
js = ("admin/js/jquery.init.js",)
|
"""This will perform basic enrichment on a given IP."""
import csv
import json
import mmap
import os
import socket
import urllib
import dns.resolver
import dns.reversename
from geoip import geolite2
from IPy import IP
from joblib import Parallel, delayed
from netaddr import AddrFormatError, IPSet
torcsv = 'Tor_ip_list_ALL.csv'
sfile = 'http://torstatus.blutmagie.de/ip_list_all.php/Tor_ip_list_ALL.csv'
SUBNET = 0
INPUTDICT = {}
SECTOR_CSV = 'sector.csv'
OUTFILE = 'IPLookup-output.csv'
CSVCOLS = '"ip-address","asn","as-name","isp","abuse-1","abuse-2","abuse-3","domain","reverse-dns","type","country","lat","long","tor-node"'
def identify(var):
result = ""
with open(SECTOR_CSV) as f:
root = csv.reader(f)
for i in root:
if i[0] in var:
result = i[1]
return result
def lookup(value):
"""Perform a dns request on the given value."""
try:
answers = dns.resolver.query(value, 'TXT')
for rdata in answers:
for txt_string in rdata.strings:
value = txt_string.replace(" | ", "|")
value = value.replace(" |", "|").split("|")
except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers):
value = []
return value
def flookup(value, fname, sfile):
"""Look up a value in a file."""
try:
fhandle = open(fname)
except IOError:
sourceFile = urllib.URLopener()
sourceFile.retrieve(
sfile,
fname)
fhandle = open(fname)
search = mmap.mmap(fhandle.fileno(), 0, access=mmap.ACCESS_READ)
if search.find(value) != -1:
return 'true'
else:
return 'false'
def iprange(sample, sub):
"""Identify if the given ip address is in the previous range."""
if sub is not 0:
try:
ipset = IPSet([sub])
if sample in ipset:
return True
except AddrFormatError:
return False
else:
return False
def mainlookup(var):
"""Wrap the main lookup and generated the dictionary."""
global SUBNET
global INPUTDICT
var = ''.join(var.split())
if IP(var).iptype() != 'PRIVATE' and IP(var).version() == 4:
if iprange(var, SUBNET) is True:
print
elif INPUTDICT.get("ip-address") == var:
print
else:
try:
socket.inet_aton(var)
except socket.error:
var = socket.gethostbyname(var)
contactlist = []
rvar = '.'.join(reversed(str(var).split(".")))
origin = lookup(rvar + '.origin.asn.shadowserver.org')
SUBNET = origin[1]
try:
contact = lookup(rvar + '.abuse-contacts.abusix.org')
contactlist = str(contact[0]).split(",")
except IndexError:
contactlist = []
contactlist.extend(["-"] * (4 - len(contactlist)))
try:
addr = dns.reversename.from_address(var)
rdns = str(dns.resolver.query(addr, "PTR")[0])
except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers):
rdns = ""
match = geolite2.lookup(var)
if match is None or match.location is None:
country = ''
location = ["", ""]
else:
country = match.country
location = match.location
tor = flookup(var, torcsv, sfile)
category = identify(origin[4])
if category == "":
category = identify(contactlist[0])
origin.extend(["-"] * (6 - len(origin)))
INPUTDICT = {
'abuse-1': contactlist[0],
'abuse-2': contactlist[1],
'abuse-3': contactlist[2],
'as-name': origin[2],
'asn': origin[0],
'country': country,
'descr': origin[5],
'domain': origin[4],
'ip-address': var,
'lat': location[0],
'long': location[1],
'reverse-dns': rdns,
'tor-node': tor,
'sector': category,
}
else:
INPUTDICT = {
'abuse-1': "", 'abuse-2': "", 'abuse-3': "", 'as-name': "",
'asn': "", 'country': "", 'descr': "", 'domain': "",
'domain-count': "", 'ip-address': var, 'lat': "", 'long': "",
'reverse-dns': "", 'tor-node': "", 'sector': "",
}
INPUTDICT['ip-address'] = var
out = json.dumps(
INPUTDICT,
indent=4,
sort_keys=True,
ensure_ascii=False)
csvout(INPUTDICT)
return out
def batch(inputfile):
"""Handle batch lookups using file based input."""
if os.path.isfile(OUTFILE):
os.remove(OUTFILE)
fhandle = open(OUTFILE, "a")
header = 0
if header == 0:
fhandle.write(str(CSVCOLS) + "\n")
header = 1
fhandle.close()
with open(inputfile) as fhandle:
Parallel(n_jobs=100, verbose=51)(delayed(mainlookup)(i.rstrip('\n'))
for i in fhandle)
def single(lookupvar):
"""Do a single IP lookup."""
result = mainlookup(lookupvar)
return result
def csvout(inputdict):
"""Generate a CSV file from the output inputdict."""
fhandle = open(OUTFILE, "a")
# header = 0
# if header == 0:
# fhandle.write("Boop")
# header = 1
try:
writer = csv.writer(fhandle, quoting=csv.QUOTE_ALL)
writer.writerow((
inputdict['ip-address'],
inputdict['asn'],
inputdict['as-name'],
inputdict['descr'],
inputdict['abuse-1'],
inputdict['abuse-2'],
inputdict['abuse-3'],
inputdict['domain'],
inputdict['reverse-dns'],
inputdict['sector'],
inputdict['country'],
inputdict['lat'],
inputdict['long'],
inputdict['tor-node']))
finally:
fhandle.close()
def main():
import argparse
PARSER = argparse.ArgumentParser()
PARSER.add_argument("-t",
choices=('single', 'batch'),
required="false",
metavar="request-ty | pe",
help="Either single or batch request")
PARSER.add_argument("-v",
required="false",
metavar="value",
| help="The value of the request")
ARGS = PARSER.parse_args()
if ARGS.t == "single":
print(single(ARGS.v))
elif ARGS.t == "batch":
batch(ARGS.v)
else:
PARSER.print_help()
if __name__ == "__main__":
main()
|
hemaError as exc:
get_logger().error('downloads.ini failed schema validation (located in %s)', path)
raise exc
return new_data
def __init__(self, ini_paths):
"""Reads an iterable of pathlib.Path to download.ini files"""
self._data = configparser.ConfigParser()
for path in ini_paths:
self._data.read_dict(self._parse_data(path))
def __getitem__(self, section):
"""
Returns an object with keys as attributes and
values already pre-processed strings
"""
return self._DownloadsProperties(self._data[section], self._passthrough_properties,
self._hashes)
def __contains__(self, item):
"""
Returns True if item is a name of a section; False otherwise.
"""
return self._data.has_section(item)
def __iter__(self):
"""Returns an iterator over the section names"""
return iter(self._data.sections())
def properties_iter(self):
"""Iterator for the download properties sorted by output path"""
return sorted(
map(lambda x: (x, self[x]), self), key=(lambda x: str(Path(x[1].output_path))))
class _UrlRetrieveReportHook: #pylint: disable=too-few-public-methods
"""Hook for urllib.request.urlretrieve to log progress information to console"""
def __init__(self):
self._max_len_printed = 0
self._last_percentage = None
def __call__(self, block_count, block_size, total_size):
# Use total_blocks to handle case total_size < block_size
# total_blocks is ceiling of total_size / block_size
# Ceiling division from: https://stackoverflow.com/a/17511341
total_blocks = -(-total_size // block_size)
if total_blocks > 0:
# Do not needlessly update the console. Since the console is
# updated synchronously, we don't want updating the console to
# bottleneck downloading. Thus, only refresh the output when the
# displayed value should change.
percentage = round(block_count / total_blocks, ndigits=3)
if percentage == self._last_percentage:
return
self._last_percentage = percentage
print('\r' + ' ' * self._max_len_printed, end='')
status_line = 'Progress: {:.1%} of {:,d} B'.format(percentage, total_size)
else:
downloaded_estimate = block_count * block_size
status_line = 'Progress: {:,d} B of unknown size'.format(downloaded_estimate)
self._max_len_printed = len(status_line)
print('\r' + status_line, end='')
def _download_via_urllib(url, file_path, show_progress, disable_ssl_verification):
reporthook = None
if show_progress:
reporthook = _UrlRetrieveReportHook()
if disable_ssl_verification:
import ssl
# TODO: Remove this or properly implement disabling SSL certificate verification
orig_https_context = ssl._create_default_https_context #pylint: disable=protected-access
ssl._create_default_https_context = ssl._create_unverified_context #pylint: disable=protected-access
try:
urllib.request.urlretrieve(url, str(file_path), reporthook=reporthook)
finally:
# Try to reduce damage of hack by reverting original HTTPS context ASAP
if disable_ssl_verification:
ssl._create_default_https_context = orig_https_context #pylint: disable=protected-access
if show_progress:
print()
def _dow | nload_if_needed(file_path, url, show_progress, disable_ssl_verification):
"""
Downloads a file from url to the specified path file_path if necessary.
If show_progress is True, download progress is printed to the console.
"""
if file_path.exists():
get_logger().info('%s already exists. Skipping download.', file_path)
| return
# File name for partially download file
tmp_file_path = file_path.with_name(file_path.name + '.partial')
if tmp_file_path.exists():
get_logger().debug('Resuming downloading URL %s ...', url)
else:
get_logger().debug('Downloading URL %s ...', url)
# Perform download
if shutil.which('curl'):
get_logger().debug('Using curl')
try:
subprocess.run(['curl', '-L', '-o', str(tmp_file_path), '-C', '-', url], check=True)
except subprocess.CalledProcessError as exc:
get_logger().error('curl failed. Re-run the download command to resume downloading.')
raise exc
else:
get_logger().debug('Using urllib')
_download_via_urllib(url, tmp_file_path, show_progress, disable_ssl_verification)
# Download complete; rename file
tmp_file_path.rename(file_path)
def _chromium_hashes_generator(hashes_path):
with hashes_path.open(encoding=ENCODING) as hashes_file:
hash_lines = hashes_file.read().splitlines()
for hash_name, hash_hex, _ in map(lambda x: x.lower().split(' '), hash_lines):
if hash_name in hashlib.algorithms_available:
yield hash_name, hash_hex
else:
get_logger().warning('Skipping unknown hash algorithm: %s', hash_name)
def _get_hash_pairs(download_properties, cache_dir):
"""Generator of (hash_name, hash_hex) for the given download"""
for entry_type, entry_value in download_properties.hashes.items():
if entry_type == 'hash_url':
hash_processor, hash_filename, _ = entry_value
if hash_processor == 'chromium':
yield from _chromium_hashes_generator(cache_dir / hash_filename)
else:
raise ValueError('Unknown hash_url processor: %s' % hash_processor)
else:
yield entry_type, entry_value
def retrieve_downloads(download_info, cache_dir, show_progress, disable_ssl_verification=False):
"""
Retrieve downloads into the downloads cache.
download_info is the DowloadInfo of downloads to retrieve.
cache_dir is the pathlib.Path to the downloads cache.
show_progress is a boolean indicating if download progress is printed to the console.
disable_ssl_verification is a boolean indicating if certificate verification
should be disabled for downloads using HTTPS.
Raises FileNotFoundError if the downloads path does not exist.
Raises NotADirectoryError if the downloads path is not a directory.
"""
if not cache_dir.exists():
raise FileNotFoundError(cache_dir)
if not cache_dir.is_dir():
raise NotADirectoryError(cache_dir)
for download_name, download_properties in download_info.properties_iter():
get_logger().info('Downloading "%s" to "%s" ...', download_name,
download_properties.download_filename)
download_path = cache_dir / download_properties.download_filename
_download_if_needed(download_path, download_properties.url, show_progress,
disable_ssl_verification)
if download_properties.has_hash_url():
get_logger().info('Downloading hashes for "%s"', download_name)
_, hash_filename, hash_url = download_properties.hashes['hash_url']
_download_if_needed(cache_dir / hash_filename, hash_url, show_progress,
disable_ssl_verification)
def check_downloads(download_info, cache_dir):
"""
Check integrity of the downloads cache.
download_info is the DownloadInfo of downloads to unpack.
cache_dir is the pathlib.Path to the downloads cache.
Raises source_retrieval.HashMismatchError when the computed and expected hashes do not match.
"""
for download_name, download_properties in download_info.properties_iter():
get_logger().info('Verifying hashes for "%s" ...', download_name)
download_path = cache_dir / download_properties.download_filename
with download_path.open('rb') as file_obj:
archive_data = file_obj.read()
for hash_name, hash_hex in _get_hash_pairs(download_properties, cache_dir):
get_logger().debug('Verifying %s hash...', |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not | use this file except in complianc | e with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for Recognize
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-speech
# [START speech_v1p1beta1_generated_Speech_Recognize_async]
from google.cloud import speech_v1p1beta1
async def sample_recognize():
# Create a client
client = speech_v1p1beta1.SpeechAsyncClient()
# Initialize request argument(s)
config = speech_v1p1beta1.RecognitionConfig()
config.language_code = "language_code_value"
audio = speech_v1p1beta1.RecognitionAudio()
audio.content = b'content_blob'
request = speech_v1p1beta1.RecognizeRequest(
config=config,
audio=audio,
)
# Make the request
response = await client.recognize(request=request)
# Handle the response
print(response)
# [END speech_v1p1beta1_generated_Speech_Recognize_async]
|
nv python2
#
# Copyright (C) 2013-2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# -*- coding: utf-8 -*-
###########################################################################
# #
# ESPResSo++ Python script for tabulated GROMACS simulation #
# #
###########################################################################
import sys
import time
import espressopp
import mpi4py.MPI as MPI
import logging
import copy
import math
from espressopp import Real3D, Int3D
from espressopp.tools import gromacs
from espressopp.tools import decomp
from espressopp.tools import timers
import pathintegral
def genTabPotentials(tabfilesnb):
potentials = {}
for fg in tabfilesnb:
fe = fg.split(".")[0]+".tab" # name of espressopp file
gromacs.convertTable(fg, fe, sigma, epsilon, c6, c12)
pot = espressopp.interaction.Tabulated(itype=3, filename=fe, cutoff=rc)
t1, t2 = fg[6], fg[8] # type 1, type 2
potentials.update({t1+"_"+t2: pot})
print "created", t1, t2, fe
return potentials
# This example reads in a gromacs water system (SPC/Fw) treated with reaction field. See the corresponding gromacs grompp.mdp paramter file.
# Output of gromacs energies and esp energies should be the same
# simulation parameters (nvt = False is nve)
steps = 1 #100
check = 1 #steps/10
rc = 0.9 # Verlet list cutoff
skin = 0.14
timestep = 0.0002
# parameters to convert GROMACS tabulated potential file
sigma = 1.0
epsilon = 1.0
c6 = 1.0
c12 = 1.0
# GROMACS setup files
grofile = "conf.gro"
topfile = "topol.top"
# this calls the gromacs parser for processing the top file (and included files) and the conf file
# The variables at the beginning defaults, types, etc... can be found by calling
# gromacs.read(grofile,topfile) without return values. It then prints out the variables to be unpacked
defaults, types, atomtypes, masses, charges, atomtypeparameters, bondtypes, bondtypeparams, angletypes, angletypeparams, exclusions, x, y, z, resname, resid, Lx, Ly, Lz= gromacs.read(grofile,topfile)
######################################################################
## IT SHOULD BE UNNECESSARY TO MAKE MODIFICATIONS BELOW THIS LINE ##
######################################################################
#types, bonds, angles, dihedrals, x, y, z, vx, vy, vz, Lx, Ly, Lz = gromacs.read(grofile,topfile)
num_particles = len(x)
density = num_particles / (Lx * Ly * Lz)
size = (Lx, Ly, Lz)
sys.stdout.write('Setting up simulation ...\n')
system = espressopp.System()
system.rng = espressopp.esutil.RNG()
system.bc = espressopp.bc.OrthorhombicBC(system.rng, size)
system.skin = skin
comm = MPI.COMM_WORLD
nodeGrid = decomp.nodeGrid(comm.size,size,rc,skin)
cellGrid = decomp.cellGrid(size, nodeGrid, rc, skin)
system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid)
# setting up GROMACS interaction stuff
# create a force capped Lennard-Jones interaction that uses a verlet list
verletlist = espressopp.VerletList(system, rc)
#interaction = espressopp.interaction.VerletListLennardJonesGromacs(verletlist)
# add particles to the system and then decompose
props = ['id', 'pos', 'v', 'type', 'mass', 'q']
allParticles = []
for pid in range(num_particles):
part = [pid + 1, Real3D(x[pid], y[pid], z[pid]),
Real3D(0, 0, 0), types[pid], masses[pid], charges[pid]]
allParticles.append(part)
system.storage.addParticles(allParticles, *props)
#system.storage.decompose()
# set up LJ interaction according to the parameters read from the .top file
#ljinteraction=gromacs.setLennardJonesInteractions(system, defaults, atomtypeparameters, verletlist,rc)
########## tabulated nb interactions ############
tabfilesnb = ["table_O_O.xvg", "table_H_O.xvg", "table_H_H.xvg"]
potentials = genTabPotentials(tabfilesnb)
tabulatedinteraction = espressopp.interaction.VerletListTabulated(verletlist)
tabulatedinteraction.setPotential(0, 0, potentials["O_O"])
tabulatedinteraction.setPotential(0, 1, potentials["H_O"])
tabulatedinteraction.setPotential(1, 1, potentials["H_H"])
system.addInteraction(tabulatedinteraction)
# set up angle interactions according to the parameters read from the .top file
angleinteractions=gromacs.setAngleInteractions(system, angletypes, angletypeparams)
# set up bonded interactions according to the parameters read from the .top file
bondedinteractions=gromacs.setBondedInteractions(system, bondtypes, bondtypeparams)
# exlusions, i.e. pairs of atoms not considered for the non-bonded part. Those are defined either by bonds which automatically generate an exclusion. Or by the nregxcl variable
verletlist.exclude(exclusions)
# langevin thermostat
langevin = espressopp.integrator.LangevinThermostat(system)
langevin.gamma = 10
langevin.temperature = 2.4942 # kT in gromacs units
integrator = espressopp.integrator.VelocityVerlet(system)
integrator.addExtension(langevin)
integrator.dt = timestep
print "POT", potentials
pathintegral.createPathintegralSystem(allParticles, props, types, system, langevin, potentials, P=16)
system.storage.decompose()
num_particles = int(espressopp.analysis.NPart(system).compute())
# print simulation parameters
print ''
print 'number of particles =', num_particles
print 'density = %.4f' % (density)
print 'rc =', rc
print 'dt =', integrator.dt
print 'skin =', system.skin
print 'steps =', steps
print 'NodeGrid = %s' % (nodeGrid,)
print 'CellGrid = %s' % (cellGrid,)
print ''
# analysis
configurations = espressopp.analysis.Configurations(system)
configurations.gather()
temperature = espressopp.analysis.Temperature(system)
pressure = espressopp.analysis.Pressure(system)
pressureTensor = espressopp.analysis.PressureTensor(system)
print "i*timestep,Eb, EAng, ETab, Ek, Etotal T"
fmt='%5.5f %15.8g %15.8g %15.8g %15.8g %15.8f %15.8f\n'
outfile = open("esp.dat", "w")
start_time = time.clock()
espressopp.tools.psfwrite("system.psf", system)
#espressopp.tools.decomp.tuneSkin(system, integrator)
#espressopp.tools.analyse.info(system, integrator)
espressopp.tools.fastwritexyz("traj.xyz", system, append=Fals | e, scale=10)
for i in range(check):
T = temperature.compute()
P = pressure.compute()
Eb = 0
EAng = 0
ETab=0
#for bd in bondedinteractions.values(): Eb+=bd.computeEnergy()
#for ang in angleinteractions.values(): EAng+=ang.computeEnergy()
#ELj= ljinteraction.computeEnergy()
#EQQ= qq_interactions.compute | Energy()
ETab= tabulatedinteraction.computeEnergy()
T = temperature.compute()
Ek = 0.5 * T * (3 * num_particles)
Etotal = Ek+Eb+EAng+ETab
sys.stdout.write(fmt%(i*timestep,Eb, EAng, ETab, Ek, Etotal, T))
outfile.write(fmt%(i*timestep,Eb, EAng, ETab, Ek, Etotal, T))
#espressopp.tools.pdb.pdbfastwrite("traj.pdb", system, append=True)
espressopp.tools.fastwritexyz("traj.xyz", system, append=True, scale=10)
integrator.run(steps/check) # print out every steps/check steps
#espressopp.tools.vmd.imd_positions(system, sock)
# print timings and neighbor list information
end_time = time.clock()
timers.show(integrator.getTimers(), precision=2)
espressopp.tools.analyse.final_info(system, integrator, verletlist, start_time, end_time)
sys.stdout.write('Integration steps = %d\n' % integrator.step)
sys.stdout.write('CPU time = %.1f\n' % (end_time - start_time))
|
# encoding: utf-8
from __future__ import absolute_import, division, print_function
import numpy as np
import tables
from liam2.data import merge_arrays, get_fields, index_table_light, merge_array_records
from liam2.utils import timed, loop_wh_progress, merge_items
__version__ = "0.4"
def get_group_fields(node):
if node is None:
return {}
# noinspection PyProtectedMember
return {table._v_name: get_fields(table) for table in node._f_iter_nodes()}
def merge_group(parent1, parent2, name, output_file, index_col):
print()
print(name)
print('=' * len(name))
group1 = getattr(parent1, name, None)
group2 = getattr(parent2, name, None)
if group1 is None and group2 is None:
print("node not found in either input files, skipped")
return
output_group = output_file.create_group("/", name)
fields1 = get_group_fields(group1)
fields2 = get_group_fields(group2)
ent_names1 = set(fields1.keys())
ent_names2 = set(fields2.keys())
for ent_name in sorted(ent_names1 | ent_names2):
print()
print(ent_name)
ent_fields1 = fields1.get(ent_name, [])
ent_fields2 = fields2.get(ent_name, [])
output_fields = merge_items(ent_fields1, ent_fields2)
output_table = output_fi | le.create_table(output_group, ent_name,
np.dtype(output_fields))
if ent_name in ent_names1:
table1 = getattr(group1, ent_name)
# no | inspection PyProtectedMember
print(" * indexing table from %s ..." % group1._v_file.filename,
end=' ')
input1_rows = index_table_light(table1, index_col)
print("done.")
else:
table1 = None
input1_rows = {}
if ent_name in ent_names2:
table2 = getattr(group2, ent_name)
# noinspection PyProtectedMember
print(" * indexing table from %s ..." % group2._v_file.filename,
end=' ')
input2_rows = index_table_light(table2, index_col)
print("done.")
else:
table2 = None
input2_rows = {}
print(" * merging: ", end=' ')
input1_periods = set(input1_rows.keys())
input2_periods = set(input2_rows.keys())
output_periods = sorted(input1_periods | input2_periods)
# noinspection PyUnusedLocal
def merge_period(period_idx, period):
if ent_name in ent_names1:
start, stop = input1_rows.get(period, (0, 0))
input1_array = table1.read(start, stop)
else:
input1_array = None
if ent_name in ent_names2:
start, stop = input2_rows.get(period, (0, 0))
input2_array = table2.read(start, stop)
else:
input2_array = None
if ent_name in ent_names1 and ent_name in ent_names2:
if 'id' in input1_array.dtype.names:
assert 'id' in input2_array.dtype.names
output_array, _ = merge_arrays(input1_array, input2_array)
else:
output_array = merge_array_records(input1_array,
input2_array)
elif ent_name in ent_names1:
output_array = input1_array
elif ent_name in ent_names2:
output_array = input2_array
else:
raise Exception("this shouldn't have happened")
output_table.append(output_array)
output_table.flush()
loop_wh_progress(merge_period, output_periods)
print(" done.")
def merge_h5(input1_path, input2_path, output_path):
input1_file = tables.open_file(input1_path)
input2_file = tables.open_file(input2_path)
output_file = tables.open_file(output_path, mode="w")
input1root = input1_file.root
input2root = input2_file.root
merge_group(input1root, input2root, 'globals', output_file, 'PERIOD')
merge_group(input1root, input2root, 'entities', output_file, 'period')
input1_file.close()
input2_file.close()
output_file.close()
if __name__ == '__main__':
import sys
import platform
print("LIAM HDF5 merge %s using Python %s (%s)\n" %
(__version__, platform.python_version(), platform.architecture()[0]))
args = sys.argv
if len(args) < 4:
print("Usage: %s inputpath1 inputpath2 outputpath" % args[0])
sys.exit()
timed(merge_h5, args[1], args[2], args[3])
|
cale, UnknownLocaleError
from babel.dates import format_datetime
from babel.messages import checkers
from babel.messages.plurals import PLURALS
from babel.messages.pofile import read_po
from babel.util import LOCALTZ
from babel._compat import BytesIO
class CheckersTestCase(unittest.TestCase):
# the last msgstr[idx] is always missing except for singular plural forms
def test_1_num_plurals_checkers(self):
for _locale in [p for p in PLURALS if PLURALS[p][0] == 1]:
try:
locale = Locale.parse(_locale)
except UnknownLocaleError:
# Just an alias? Not what we're testing here, let's continue
continue
po_file = (u"""\ |
# %(english_name)s translations for TestProject.
# Copyright (C) 2007 FooBar, Inc.
# This file is distributed under the same license as the TestProject
# project.
# FIRST AUTHOR <EMAIL@ADDRES | S>, 2007.
#
msgid ""
msgstr ""
"Project-Id-Version: TestProject 0.1\\n"
"Report-Msgid-Bugs-To: bugs.address@email.tld\\n"
"POT-Creation-Date: 2007-04-01 15:30+0200\\n"
"PO-Revision-Date: %(date)s\\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n"
"Language-Team: %(locale)s <LL@li.org>\n"
"Plural-Forms: nplurals=%(num_plurals)s; plural=%(plural_expr)s\\n"
"MIME-Version: 1.0\\n"
"Content-Type: text/plain; charset=utf-8\\n"
"Content-Transfer-Encoding: 8bit\\n"
"Generated-By: Babel %(version)s\\n"
#. This will be a translator comment,
#. that will include several lines
#: project/file1.py:8
msgid "bar"
msgstr ""
#: project/file2.py:9
msgid "foobar"
msgid_plural "foobars"
msgstr[0] ""
""" % dict(locale=_locale,
english_name=locale.english_name,
version=VERSION,
year=time.strftime('%Y'),
date=format_datetime(datetime.now(LOCALTZ),
'yyyy-MM-dd HH:mmZ',
tzinfo=LOCALTZ, locale=_locale),
num_plurals=PLURALS[_locale][0],
plural_expr=PLURALS[_locale][0])).encode('utf-8')
# This test will fail for revisions <= 406 because so far
# catalog.num_plurals was neglected
catalog = read_po(BytesIO(po_file), _locale)
message = catalog['foobar']
checkers.num_plurals(catalog, message)
def test_2_num_plurals_checkers(self):
# in this testcase we add an extra msgstr[idx], we should be
# disregarding it
for _locale in [p for p in PLURALS if PLURALS[p][0] == 2]:
if _locale in ['nn', 'no']:
_locale = 'nn_NO'
num_plurals = PLURALS[_locale.split('_')[0]][0]
plural_expr = PLURALS[_locale.split('_')[0]][1]
else:
num_plurals = PLURALS[_locale][0]
plural_expr = PLURALS[_locale][1]
try:
locale = Locale(_locale)
date = format_datetime(datetime.now(LOCALTZ),
'yyyy-MM-dd HH:mmZ',
tzinfo=LOCALTZ, locale=_locale)
except UnknownLocaleError:
# Just an alias? Not what we're testing here, let's continue
continue
po_file = (u"""\
# %(english_name)s translations for TestProject.
# Copyright (C) 2007 FooBar, Inc.
# This file is distributed under the same license as the TestProject
# project.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2007.
#
msgid ""
msgstr ""
"Project-Id-Version: TestProject 0.1\\n"
"Report-Msgid-Bugs-To: bugs.address@email.tld\\n"
"POT-Creation-Date: 2007-04-01 15:30+0200\\n"
"PO-Revision-Date: %(date)s\\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n"
"Language-Team: %(locale)s <LL@li.org>\\n"
"Plural-Forms: nplurals=%(num_plurals)s; plural=%(plural_expr)s\\n"
"MIME-Version: 1.0\\n"
"Content-Type: text/plain; charset=utf-8\\n"
"Content-Transfer-Encoding: 8bit\\n"
"Generated-By: Babel %(version)s\\n"
#. This will be a translator comment,
#. that will include several lines
#: project/file1.py:8
msgid "bar"
msgstr ""
#: project/file2.py:9
msgid "foobar"
msgid_plural "foobars"
msgstr[0] ""
msgstr[1] ""
msgstr[2] ""
""" % dict(locale=_locale,
english_name=locale.english_name,
version=VERSION,
year=time.strftime('%Y'),
date=date,
num_plurals=num_plurals,
plural_expr=plural_expr)).encode('utf-8')
# we should be adding the missing msgstr[0]
# This test will fail for revisions <= 406 because so far
# catalog.num_plurals was neglected
catalog = read_po(BytesIO(po_file), _locale)
message = catalog['foobar']
checkers.num_plurals(catalog, message)
def test_3_num_plurals_checkers(self):
for _locale in [p for p in PLURALS if PLURALS[p][0] == 3]:
po_file = (r"""\
# %(english_name)s translations for TestProject.
# Copyright (C) 2007 FooBar, Inc.
# This file is distributed under the same license as the TestProject
# project.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2007.
#
msgid ""
msgstr ""
"Project-Id-Version: TestProject 0.1\n"
"Report-Msgid-Bugs-To: bugs.address@email.tld\n"
"POT-Creation-Date: 2007-04-01 15:30+0200\n"
"PO-Revision-Date: %(date)s\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: %(locale)s <LL@li.org>\n"
"Plural-Forms: nplurals=%(num_plurals)s; plural=%(plural_expr)s\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel %(version)s\n"
#. This will be a translator comment,
#. that will include several lines
#: project/file1.py:8
msgid "bar"
msgstr ""
#: project/file2.py:9
msgid "foobar"
msgid_plural "foobars"
msgstr[0] ""
msgstr[1] ""
""" % dict(locale=_locale,
english_name=Locale.parse(_locale).english_name,
version=VERSION,
year=time.strftime('%Y'),
date=format_datetime(datetime.now(LOCALTZ),
'yyyy-MM-dd HH:mmZ',
tzinfo=LOCALTZ, locale=_locale),
num_plurals=PLURALS[_locale][0],
plural_expr=PLURALS[_locale][0])).encode('utf-8')
# This test will fail for revisions <= 406 because so far
# catalog.num_plurals was neglected
catalog = read_po(BytesIO(po_file), _locale)
message = catalog['foobar']
checkers.num_plurals(catalog, message)
def test_4_num_plurals_checkers(self):
for _locale in [p for p in PLURALS if PLURALS[p][0] == 4]:
po_file = (r"""\
# %(english_name)s translations for TestProject.
# Copyright (C) 2007 FooBar, Inc.
# This file is distributed under the same license as the TestProject
# project.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2007.
#
msgid ""
msgstr ""
"Project-Id-Version: TestProject 0.1\n"
"Report-Msgid-Bugs-To: bugs.address@email.tld\n"
"POT-Creation-Date: 2007-04-01 15:30+0200\n"
"PO-Revision-Date: %(date)s\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: %(locale)s <LL@li.org>\n"
"Plural-Forms: nplurals=%(num_plurals)s; plural=%(plural_expr)s\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel %(version)s\n"
#. This will be a translator comment,
#. that will include several lines
#: project/file1.py:8
msgid "bar"
msgstr ""
#: project/file2.py:9
msgid "foobar"
msgid_plural "foobars"
msgstr[0] ""
msgstr[1] ""
msgstr[2] ""
""" % dict(locale=_locale,
english_name=Locale.parse(_locale).english_name,
version=VERSION,
year=time.strftime('%Y'),
date=format_datetime(datetime.now(LOCALTZ),
'yyyy-MM-dd HH:mmZ',
tzinfo=LOCALTZ, locale=_locale),
num_plurals=PLURALS[_locale][0],
plural_expr=PLURALS[_locale][0])).encode('utf-8')
# This test will fail for revisions <= 406 because so far
# catalog.num_plurals was neglected
|
+= [""] * len(extensionsList)
self.session.openWithCallback(self.extensionCallback, ChoiceBox, title=_("Please choose an extension..."), list = list, keys = keys, skin_name = "ExtensionsList")
def extensionCallback(self, answer):
if answer is not None:
answer[1][1]()
def showPluginBrowser(self):
from Screens.PluginBrowser import PluginBrowser
self.session.open(PluginBrowser)
def openCCcamInfo(self):
from Screens.CCcamInfo import CCcamInfoMain
self.session.open(CCcamInfoMain)
def openOScamInfo(self):
from Screens.OScamInfo import OscamInfoMenu
self.session.open(OscamInfoMenu)
def showTimerList(self):
self.session.open(TimerEditList)
def openLogManager(self):
from Screens.LogManager import LogManager
self.session.open(LogManager)
def open3DSetup(self):
from Screens.UserInterfacePositioner import OSD3DSetupScreen
self.session.open(OSD3DSetupScreen)
def openSoftcamPanel(self):
from Plugins.Extensions.Infopanel.SoftcamPanel import SoftcamPanel
self.session.open(SoftcamPanel)
def openRestartNetwork(self):
try:
from Plugins.Extensions.Infopanel.RestartNetwork import RestartNetwork
self.session.open(RestartNetwork)
except:
print'[INFOBARGENERICS] failed to restart network'
def showAutoTimerList(self):
if os.path.exists("/usr/lib/enigma2/python/Plugins/Extensions/AutoTimer/plugin.pyo"):
from Plugins.Extensions.AutoTimer.plugin import main, autostart
from Plugins.Extensions.AutoTimer.AutoTimer import AutoTimer
from Plugins.Extensions.AutoTimer.AutoPoller import AutoPoller
self.autopoller = AutoPoller()
self.autotimer = AutoTimer()
try:
self.autotimer.readXml()
except SyntaxError as se:
self.session.open(
MessageBox,
_("Your config file is not well-formed:\n%s") % (str(se)),
type = MessageBox.TYPE_ERROR,
timeout = 10
)
return
# Do not run in background while editing, this might screw things up
if self.autopoller is not None:
self.autopoller.stop()
from Plugins.Extensions.AutoTimer.AutoTimerOverview import AutoTimerOverview
self.session.openWithCallback(
self.editCallback,
AutoTimerOverview,
self.autotimer
)
else:
self.session.open(MessageBox, _("The AutoTimer plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def editCallback(self, session):
# XXX: canceling of GUI (Overview) won't affect config values which might have been changed - is this intended?
# Don't parse EPG if editing was canceled
if session is not None:
# Save xml
self.autotimer.writeXml()
# Poll EPGCache
self.autotimer.parseEPG()
# Start autopoller again if wanted
if config.plugins.autotimer.autopoll.getValue():
if self.autopoller is None:
from Plugins.Extensions.AutoTimer.AutoPoller import AutoPoller
self.autopoller = AutoPoller()
self.autopoller.start()
# Remove instance if not running in background
else:
self.autopoller = None
self.autotimer = None
def showEPGSearch(self):
from Plugins.Extensions.EPGSearch.EPGSearch import EPGSearch
s = self.session.nav.getCurrentService()
if s:
info = s.info()
event = info.getEvent(0) # 0 = now, 1 = next
if event:
name = event and event.getEventName() or ''
else:
name = self.session.nav.getCurrentlyPlayingServiceOrGroup().toString()
name = name.split('/')
name = name[-1]
name = name.replace('.',' ')
name = name.split('-')
name = name[0]
if name.endswith(' '):
name = name[:-1]
if name:
self.session.open(EPGSearch, name, False)
else:
self.session.open(EPGSearch)
else:
self.session.open(EPGSearch)
def showIMDB(self):
if os.path.exists("/usr/lib/enigma2/python/Plugins/Extensions/IMDb/plugin.pyo"):
from Plugins.Extensions.IMDb.plugin import IMDB
s = self.session.nav.getCurrentService()
if s:
info = s.info()
event = info.getEvent(0) # 0 = now, 1 = next
name = event and event.getEventName() or ''
self.session.open(IMDB, name)
else:
self.session.open(MessageBox, _("The IMDb plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def showMediaPlayer(self):
if isinstance(self, InfoBarExtensions):
if isinstance(self, InfoBar):
try: # falls es nicht installiert ist
from Plugins.Extensions.MediaPlayer.plugin import MediaPlayer
self.session.open(MediaPlayer)
no_plugin = False
except Exception, e:
self.session.open(MessageBox, _("The MediaPlayer plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
from Tools.BoundFunction import boundFunction
import inspect
# depends on InfoBarExtensions
class InfoBarPlugins:
def __init__(self):
self.addExtension(extension = self.getPluginList, type = InfoBarExtensions.EXTENSION_LIST)
def getPluginName(self, name):
return name
def getPluginList(self):
l = []
for p in plugins.getPlugins(where = PluginDescriptor.WHERE_EXTENSIONSMENU):
args = inspect.getargspec(p.__call__)[0]
if len(args) == 1 or len(args) == 2 and isinstance(self, InfoBarChannelSelection):
l.append(((boundFunction(self.getPluginName, p.name), boundFunction(self.runPlugin, p), lambda: True), None, p.name))
l.sort(key = lambda e: e[2]) # sort by name
return l
def runPlugin(self, plugin):
if isinstance(self, InfoBarChannelSelection):
plugin(session = self.session, servicelist = self.servicelist)
else:
plugin(session = self.session)
from Components.Task import job_manager
class InfoBarJobman:
def __init__(self):
self.addExtension(extension = self.getJobList, type = InfoBarExtensions.EXTENSION_LIST)
def getJobList(self):
if config.usage.jobtaksextensions.getValue():
return [((boundFunction(self.getJobName, job), boundFunction(self.showJobView, job), lambda: True), None) for job in job_manager.getPendingJobs()]
else:
return []
def getJobName(self, job):
return "%s: %s (%d%%)" % (job.getStatustext(), job.name, int(100*job.progress/float(job.end)))
def showJobView(self, job):
from Screens.TaskView import JobView
job_manager.in_background = False
self.session.openWithCallback(self.JobViewCB, JobView, job)
def JobViewCB(self, in_background):
job_manager.in_background = in_background
# depends on InfoBarExtensions
class InfoBarPiP:
def __init__(self):
try:
self.session.pipshown
except:
self.session.pipshown = False
if SystemInfo.get("NumVideoDecoders", 1) > 1 and isinstance(self, InfoBarEPG):
self["PiPActions"] = HelpableActionMap(self, "InfobarPiPActions",
{
"activatePiP": (self.showPiP, _("Activate PiP")),
})
if self.allowPiP:
self.addExtension((self.getShowHideName, self.showPiP, lambda: True), "blue")
self.addExtension((self.getMoveName, self.movePiP, self.pipShown), "green")
self.addExtension((self.getSwapName, self.swapPiP, self.pipShown), "yellow")
self.addExtension((self.getTogglePipzapName, self.togglePipzap, self.pipShown), "red")
else:
self.addExtension((self.getShowHideName, self.showPiP, self.pipShown), "blue")
self.addExtension((self.getMoveName, self.movePiP, self.pipShown), "green")
def pipShown(self):
return self.session.pipshown
def pipHandles0Action(self):
return self.pipShown() and config.usage.pip_zero_button.getValue() != "standard"
def getShowHideName(self):
if self.session.pipshown:
return _("Disable Picture in Picture")
else:
return _("Activate Picture in Picture")
def getSwapName(self):
return _("Swap services")
def getMoveName(self):
return _("Move Picture in Picture")
def getTogglePipzapName(self):
slist = self.servicelist
if slist and slist.dopipzap:
return _("Zap focus to main screen")
return _("Zap focus to Picture in Picture")
def togglePipzap(self):
if not self.session.pipshown:
self.showPiP()
slist = self.servicelist |
if slist and self.session.pipshown:
slist.togglePipzap()
if slist.dopipzap:
currentServicePath = self.servicelist.getC | urrentServicePath()
self.servicelist.setCurrentServicePath(self.session.pip.servicePath, doZap=False)
self.session.pip.servicePath = current |
Return a list of objects of kind :class:`stripeline.timetools.TimeChunk`.
'''
delta_time = time_length / num_of_chunks
result = []
for chunk_idx in range(num_of_chunks):
# Determine the time of each sample in this chunk
cur_time = chunk_idx * delta_time
chunk_time0 = np.ceil(cur_time * sampfreq) / sampfreq - cur_time
start_time = time0 + cur_time + chunk_time0
num_of_samples = int(delta_time * sampfreq)
result.append(TimeChunk(start_time=start_time,
num_of_samples=num_of_samples))
return result
DET_NAMES = {'Q1': 0,
'Q2': 1,
'U1': 2,
'U2': 3
}
class ToiProvider:
'''Load a TOI and split it evenly among MPI processes.
.. note:: This is an abstract base class, and it should not be instantiated.
Consider using any of its derived classes, like
:class:`stripeline.timetools.FitsToiProvider`.
In the case of a run split among many MPI processes, this class balances the
load of a long TOI. If every MPI process creates a
:class:`stripeline.timetools.ToiProvider` object, every object will take
responsibility of reading one section of the TOI. The methods
:func:`stripeline.timetools.ToiProvider.get_signal`,
:func:`stripeline.timetools.ToiProvider.get_pointings`, and
:func:`stripeline.timetools.ToiProvider.get_pixel_index` can be used by
processes to read the chunk of data which belongs to each.
'''
def __init__(self, rank: int, num_of_processes: int):
'''Create a new object.
Parameters:
* "rank" is the rank of the running MPI process
* "num_of_processes" is the number of MPI processes
'''
self.rank = rank
self.num_of_processes = num_of_processes
self.total_num_of_samples = 0
def get_time(self):
'''Return a vector containing the time of each sample in the TOI.
Only the part of the TOI that belongs to the rank of this process
is returned.'''
return None
def get_signal(self, det_idx: Union[int, str]):
# Unused
del det_idx
return None
def get_pixel_index(self, nside: int, nest=False, lonlat=False):
'''Return a vector containing the pixel index for each sample in the
TOI.
Only the part of the TOI that belongs to the rank of this process is
returned.'''
theta, phi, psi = self.get_pointings()
return healpy.ang2pix(nside, theta, phi, nest=nest, lonlat=lonlat)
def get_pointings(self):
'''Return two vectors containing the colatitude and longitude for each
sample in the TOI.
Only the part of the TOI that belongs to the rank of this process is
returned.'''
return None, None
def get_polarization_angle(self):
'''Return a vector containing the polarization angle for each sample
in the TOI.
Only the part of the TOI that belongs to the rank of this process is
returned.'''
return None
ToiFile = namedtuple('ToiFile', ['file_name', 'num_of_samples'])
def read_fits_file_information(file_name: str, hdu=1) -> ToiFile:
'''Read the number of rows in the first tabular HDU of a FITS file
Return a :class:`stripeline.timetools.ToiFile` object.
'''
with fits.open(file_name) as fin:
num_of_samples = fin[hdu].header['NAXIS2']
return ToiFile(file_name=file_name, num_of_samples=num_of_samples)
def split_into_n(length: int, num_of_segments: int) -> List[int]:
'''Split a set of `length` elements into `num_of_segments` subsets.
Example::
>>> split_into_n(10, 4)
[2 3 2 3]
>>> split_into_n(201, 2)
[100 101]
'''
assert num_of_segments > 0
assert length > num_of_segments
start_points = np.array([int(i * length / num_of_segments)
for i in range(num_of_segments + 1)])
return start_points[1:] - start_points[:-1]
def assign_toi_files_to_processes(samples_per_processes: List[int],
tod_files: List[ToiFile]):
'''Determine how to balance the load of TOI files among processes.
Given a list of samples to be processed by each MPI process, decide which
TOD and samples must be loaded by each process, using the principle that
all the processes should read the same number of TODs, when possible.
Return a list of :class:`stripeline.timetools.ToiFile` objects.
'''
assert (sum(samples_per_processes) ==
sum([x.num_of_samples for x in tod_files]))
result = [] # Type: List[List[ToiFile]]
file_idx = 0
element_idx = 0
# Iterate over the MPI processes
for samples_in_this_proc in samples_per_processes:
# This is the list of FITS segments that the current MPI process is
# going to load
segments = [] # Type: List[ToiFileSegment]
elements_in_this_segment = 0
# Iterate over the files to be read by the current MPI process
while elements_in_this_segment < samples_in_this_proc:
if elements_in_this_segment + (tod_files[file_idx].num_of_samples - element_idx) <= samples_in_this_proc:
# The whole FITS file is going to be read by the current MPI
# process
num = tod_files[file_idx].num_of_samples - element_idx
segments.append(ToiFileSegment(file_name=tod_files[file_idx].file_name,
first_element=element_idx,
num_of_elements=num))
elements_in_this_segment += num
file_idx += 1
element_idx = 0
else:
# This is the size of the segment we're going to append to "segments"
num = samples_in_this_proc - elements_in_this_segment
# Only a subset of this FITS file will be read by the current MPI process
segments.append(ToiFileSegment(file_name=tod_files[file_idx].file_name,
first_element=element_idx,
num_of_elements=num))
elements_in_this_segment += num
element_idx += num
result.append(segments)
return result
ToiFileSegment = namedtuple(
'ToiFileSegment', ['file_name', 'first_e | lement', 'num_of_elements'])
FitsColumn = namedtuple(
'FitsColumn', ['hdu', 'column']
)
FitsTableLayout = namedtuple(
'FitsTableLayout', ['time_col', 'theta_col',
'phi_col', 'psi_col', 'signal_cols']
)
def _load_array_from_fits(segments: List[ToiFileSegment], cols_to_read: List[FitsColumn]):
'''Read a set of columns from a list of FITS files.
The chunks to read from each FITS file are specified in the parameter `segments`,
while the columns to rea | d are in `cols_to_read`. The function returns a tuple
containing all the data from the columns (each in a NumPy array) in the same
order as in `cols_to_read`.'''
arrays = [np.array([], dtype=np.float64) for i in range(len(cols_to_read))]
for cur_segment in segments:
start = cur_segment.first_element
end = cur_segment.first_element + cur_segment.num_of_elements
with fits.open(cur_segment.file_name) as f:
# TODO: maybe this is not the most efficient way to load
# chunks of data from a FITS column
cur_chunk_arr = [f[x.hdu].data.field(x.column)[start:end]
for x in cols_to_read]
for col_idx in range(len(cols_to_read)):
arrays[col_idx] = np.concatenate(
[arrays[col_idx], cur_chunk_arr[col_idx]])
return tuple(arrays)
class FitsToiProvider(ToiProvider):
'''Distribute a TOI saved in FITS files among MPI processes.
This class specializes :class:`stripeline.timetools.ToiProvider` in order to
load the TOI from a set of FITS files.'''
def __init__(self,
rank: int,
|
class C:
| ''' <caret>
' | '' |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if t | he code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class OperationListResult(Model):
"""The operation list response that contains all operations for Azure
Container Instance service.
:param value: The list of operations.
:type value: list of :class:`Operation
<azure.mgmt.containerinstance.models.Operation>`
"""
_attribute_map = {
'value' | : {'key': 'value', 'type': '[Operation]'},
}
def __init__(self, value=None):
self.value = value
|
import autograd.numpy as np
from autograd.scipy.misc import logsumexp
from features import num_atom_features, num_bond_features
from util import memoize, WeightsParser
from mol_graph import graph_from_smiles_tuple, degrees
from build_vanilla_net import build_fingerprint_deep_net, relu, batch_normalize
def fast_array_from_list(xs):
return np.concatenate([np.expand_dims(x, axis=0) for x in xs], axis=0)
def sum_and_stack(features, idxs_list_of_lists):
return fast_array_from_list([np.sum(features[idx_list], axis=0) for idx_list in idxs_list_of_lists])
def softmax(X, axis=0):
return np.exp(X - logsumexp(X, axis=axis, keepdims=True))
def matmult_neighbors(array_rep, atom_features, bond_features, get_weights):
activations_by_degree = []
for degree in degrees:
atom_neighbors_list = array_rep[('atom_neighbors', degree)]
bond_neighbors_list = array_rep[('bond_neighbors', degree)]
if len(atom_neighbors_list) > 0:
neighbor_features = [atom_features[atom_neighbors_list],
bond_features[bond_neighbors_list]]
# dims of stacked_neighbors are [atoms, neighbors, atom and bond features]
stacked_neighbors = np.concatenate(neighbor_features, axis=2)
summed_neighbors = np.sum(stacked_neighbors, axis=1)
activations = np.dot(summed_neighbors, get_weights(degree))
activations_by_degree.append(activations)
# This operation relies on atoms being sorted by degree,
# in Node.graph_from_smiles_tuple()
return np.concatenate(activations_by_degree, axis=0)
def weights_name(layer, degree):
return "layer " + str(layer) + " degree " + str(degree) + " filter"
def build_convnet_fingerprint_fun(num_hidden_features=[100, 100], fp_length=512,
normalize=True, activation_function=relu,
return_atom_activations=False):
"""Sets up functions to compute convnets over all molecules in a minibatch together."""
# Specify weight shapes.
parser = WeightsParser()
all_layer_sizes = [num_atom_features()] + num_hidden_features
for layer in range(len(all_layer_sizes)):
parser.add_weights(('layer output weights', layer), (all_layer_sizes[layer], fp_length))
parser.add_weights(('layer output bias', layer), (1, fp_length))
in_and_out_sizes = zip(all_layer_sizes[:-1], all_layer_sizes[1:])
for layer, (N_prev, N_cur) in enumerate(in_and_out_sizes):
parser.add_weights(("layer", layer, "biases"), (1, N_cur))
parser.add_weights(("layer", layer, "self filter"), (N_prev, N_cur))
for degree in degrees:
parser.add_weights(weights_name(layer, degree), (N_prev + num_bond_features(), N_cur))
def update_layer(weights, layer, atom_features, bond_features, array_rep, normalize=False):
def get_weights_func(degree):
return parser.get(weights, weights_name(layer, degree))
layer_bias = parser.get(weights, ("layer", layer, "biases"))
layer_self_weights = parser.get(weights, ("layer", layer, "self filter"))
self_activations = np.dot(atom_features, layer_self_weights)
neighbour_activations = matmult_neighbors(
array_rep, atom_features, bond_features, get_weights_func)
total_activations = neighbour_activations + self_activations + layer_bias
if normalize:
total_activations = batch_normalize(total_activations)
return activation_function(total_activations)
def output_layer_fun_and_atom_activations(weights, smiles):
"""Computes layer-wise convolution, and returns a fixed-size output."""
array_rep = array_rep_from_smiles(tuple(smiles))
atom_features = array_rep['atom_features']
bond_features = array_rep['bond_features']
all_layer_fps = []
atom_activations = []
def write_to_fingerprint(atom_features, layer):
cur_out_weights = parser.get(weights, ('layer output weights', layer))
cur_out_bias = parser.get(weights, ('layer output bias', layer))
atom_outputs = softmax(cur_out_bias + np.dot(atom_features, cur_out_weights), axis=1)
atom_activations.append(atom_outputs)
# Sum over all atoms within a moleclue:
layer_output = sum_and_stack(atom_outputs, array_rep['atom_list'])
all_layer_fps.append(layer_output)
num_layers = len(num_hidden_features)
for layer in xrange(num_layers):
write_to_fingerprint(atom_features, layer)
atom_features = update_layer(weights, layer, atom_features, bond_features, array_rep,
| normalize=normalize)
write_to_fingerprint(atom_features, num_layers)
return sum(all_layer_fps), atom_act | ivations, array_rep
def output_layer_fun(weights, smiles):
output, _, _ = output_layer_fun_and_atom_activations(weights, smiles)
return output
def compute_atom_activations(weights, smiles):
_, atom_activations, array_rep = output_layer_fun_and_atom_activations(weights, smiles)
return atom_activations, array_rep
if return_atom_activations:
return output_layer_fun, parser, compute_atom_activations
else:
return output_layer_fun, parser
@memoize
def array_rep_from_smiles(smiles):
"""Precompute everything we need from MolGraph so that we can free the memory asap."""
molgraph = graph_from_smiles_tuple(smiles)
arrayrep = {'atom_features' : molgraph.feature_array('atom'),
'bond_features' : molgraph.feature_array('bond'),
'atom_list' : molgraph.neighbor_list('molecule', 'atom'), # List of lists.
'rdkit_ix' : molgraph.rdkit_ix_array()} # For plotting only.
for degree in degrees:
arrayrep[('atom_neighbors', degree)] = \
np.array(molgraph.neighbor_list(('atom', degree), 'atom'), dtype=int)
arrayrep[('bond_neighbors', degree)] = \
np.array(molgraph.neighbor_list(('atom', degree), 'bond'), dtype=int)
return arrayrep
def build_conv_deep_net(conv_params, net_params, fp_l2_penalty=0.0):
"""Returns loss_fun(all_weights, smiles, targets), pred_fun, combined_parser."""
conv_fp_func, conv_parser = build_convnet_fingerprint_fun(**conv_params)
return build_fingerprint_deep_net(net_params, conv_fp_func, conv_parser, fp_l2_penalty)
|
ackwards(self, orm):
# Deleting field 'DamageScenario.customlandusegeoimage'
db.delete_column('lizard_damage_damagescenario', 'customlandusegeoimage_id')
models = {
'lizard_damage.benefitscenario': {
'Meta': {'object_name': 'BenefitScenario'},
'datetime_created': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '128'}),
'expiration_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'zip_result': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'zip_risk_a': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'zip_risk_b': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
'lizard_damage.benefitscenarioresult': {
'Meta': {'object_name': 'BenefitScenarioResult'},
'benefit_scenario': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.BenefitScenario']"}),
'east': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'north': ('django.db.models.fields.FloatField', [], {}),
'south': ('django.db.models.fields.FloatField', [], {}),
'west': ('django.db.models.fields.FloatField', [], {})
},
'lizard_damage.damageevent': {
'Meta': {'object_name': 'DamageEvent'},
'depth_slugs': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'floodmonth': ('django.db.models.fields.IntegerField', [], {'default': '9'}),
'floodtime': ('django.db.models.fields.FloatField', [], {}),
'height_slugs': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], | {'primary_key': 'True'}),
'landuse_slugs': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'max_height': ('djang | o.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'min_height': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'repairtime_buildings': ('django.db.models.fields.FloatField', [], {'default': '432000'}),
'repairtime_roads': ('django.db.models.fields.FloatField', [], {'default': '432000'}),
'repetition_time': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'result': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'scenario': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.DamageScenario']"}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'table': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_damage.damageeventresult': {
'Meta': {'object_name': 'DamageEventResult'},
'damage_event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.DamageEvent']"}),
'east': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'north': ('django.db.models.fields.FloatField', [], {}),
'south': ('django.db.models.fields.FloatField', [], {}),
'west': ('django.db.models.fields.FloatField', [], {})
},
'lizard_damage.damageeventwaterlevel': {
'Meta': {'ordering': "(u'index',)", 'object_name': 'DamageEventWaterlevel'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.DamageEvent']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {'default': '100'}),
'waterlevel': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
'lizard_damage.damagescenario': {
'Meta': {'object_name': 'DamageScenario'},
'calc_type': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'customheights': ('django.db.models.fields.FilePathField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'customlanduse': ('django.db.models.fields.FilePathField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'customlandusegeoimage': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.GeoImage']", 'null': 'True', 'blank': 'True'}),
'damagetable': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'datetime_created': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '128'}),
'expiration_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'scenario_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'lizard_damage.geoimage': {
'Meta': {'object_name': 'GeoImage'},
'east': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'north': ('django.db.models.fields.FloatField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'south': ('django.db.models.fields.FloatField', [], {}),
'west': ('django.db.models.fields.FloatField', [], {})
},
'lizard_damage.riskresult': {
'Meta': {'object_name': 'RiskResult'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scenario': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.DamageScenario']"}),
'zip_risk': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
'lizard_damage.roads': {
'Meta': {'object_name': 'Roads', 'db_table': "u'data_roads'"},
'gid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'gridcode': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'the_geom': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '28992', 'null': 'True', 'blank': 'True'}),
'typ |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tully.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Djang | o is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to ac | tivate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
Size(32, 32))
self.saveToolButton.setObjectName(_fromUtf8("saveToolButton"))
self.openToolButton = QtGui.QToolButton(self.frame)
self.openToolButton.setGeometry(QtCore.QRect(30, 0, 32, 32))
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(_fromUtf8(":/open.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.openToolButton.setIcon(icon3)
self.openToolButton.setIconSize(QtCore.QSize(32, 32))
self.openToolButton.setObjectName(_fromUtf8("openToolButton"))
self.newToolButton = QtGui.QToolButton(self.frame)
self.newToolButton.setGeometry | (QtCore.QRect(0, 0, 32, 32))
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(_fromUtf8(":/new.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.newToolButton.setIcon(icon4)
self.newToolButton.setIconSize(QtCore.QSize(32, 32))
self.newToolButton.setObjectName(_fromUtf8("newToolButton"))
self.printToolButton = QtGui.QToolButton(self.frame)
self.pri | ntToolButton.setGeometry(QtCore.QRect(770, 0, 32, 32))
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(_fromUtf8(":/print.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.printToolButton.setIcon(icon5)
self.printToolButton.setIconSize(QtCore.QSize(32, 32))
self.printToolButton.setObjectName(_fromUtf8("printToolButton"))
self.exportToolButton = QtGui.QToolButton(self.frame)
self.exportToolButton.setGeometry(QtCore.QRect(740, 0, 32, 32))
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(_fromUtf8(":/exportpdf.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.exportToolButton.setIcon(icon6)
self.exportToolButton.setIconSize(QtCore.QSize(32, 32))
self.exportToolButton.setObjectName(_fromUtf8("exportToolButton"))
self.orderDetailsGroupBox = QtGui.QGroupBox(self.centralwidget)
self.orderDetailsGroupBox.setGeometry(QtCore.QRect(0, 40, 801, 71))
self.orderDetailsGroupBox.setObjectName(_fromUtf8("orderDetailsGroupBox"))
self.layoutWidget = QtGui.QWidget(self.orderDetailsGroupBox)
self.layoutWidget.setGeometry(QtCore.QRect(10, 20, 781, 48))
self.layoutWidget.setObjectName(_fromUtf8("layoutWidget"))
self.gridLayout = QtGui.QGridLayout(self.layoutWidget)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label_2 = QtGui.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)
self.orderNumberLabel = QtGui.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.orderNumberLabel.setFont(font)
self.orderNumberLabel.setText(_fromUtf8(""))
self.orderNumberLabel.setObjectName(_fromUtf8("orderNumberLabel"))
self.gridLayout.addWidget(self.orderNumberLabel, 0, 1, 1, 1)
self.label_3 = QtGui.QLabel(self.layoutWidget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 0, 2, 1, 1)
self.orderDateEdit = QtGui.QDateEdit(self.layoutWidget)
self.orderDateEdit.setObjectName(_fromUtf8("orderDateEdit"))
self.gridLayout.addWidget(self.orderDateEdit, 0, 3, 1, 1)
self.label_5 = QtGui.QLabel(self.layoutWidget)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout.addWidget(self.label_5, 0, 4, 1, 1)
self.paymentTermsComboBox = QtGui.QComboBox(self.layoutWidget)
self.paymentTermsComboBox.setObjectName(_fromUtf8("paymentTermsComboBox"))
self.gridLayout.addWidget(self.paymentTermsComboBox, 0, 5, 1, 1)
self.label_18 = QtGui.QLabel(self.layoutWidget)
self.label_18.setObjectName(_fromUtf8("label_18"))
self.gridLayout.addWidget(self.label_18, 1, 0, 1, 1)
self.projectComboBox = QtGui.QComboBox(self.layoutWidget)
self.projectComboBox.setObjectName(_fromUtf8("projectComboBox"))
self.gridLayout.addWidget(self.projectComboBox, 1, 1, 1, 1)
self.label_4 = QtGui.QLabel(self.layoutWidget)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 1, 2, 1, 1)
self.orderStatusComboBox = QtGui.QComboBox(self.layoutWidget)
self.orderStatusComboBox.setObjectName(_fromUtf8("orderStatusComboBox"))
self.gridLayout.addWidget(self.orderStatusComboBox, 1, 3, 1, 1)
self.taxRateLabel = QtGui.QLabel(self.layoutWidget)
self.taxRateLabel.setObjectName(_fromUtf8("taxRateLabel"))
self.gridLayout.addWidget(self.taxRateLabel, 1, 4, 1, 1)
self.taxRateValueLabel = QtGui.QLabel(self.layoutWidget)
self.taxRateValueLabel.setText(_fromUtf8(""))
self.taxRateValueLabel.setObjectName(_fromUtf8("taxRateValueLabel"))
self.gridLayout.addWidget(self.taxRateValueLabel, 1, 5, 1, 1)
self.supplierGroupBox = QtGui.QGroupBox(self.centralwidget)
self.supplierGroupBox.setGeometry(QtCore.QRect(0, 120, 801, 80))
self.supplierGroupBox.setObjectName(_fromUtf8("supplierGroupBox"))
self.layoutWidget1 = QtGui.QWidget(self.supplierGroupBox)
self.layoutWidget1.setGeometry(QtCore.QRect(280, 12, 512, 62))
self.layoutWidget1.setObjectName(_fromUtf8("layoutWidget1"))
self.gridLayout_2 = QtGui.QGridLayout(self.layoutWidget1)
self.gridLayout_2.setMargin(0)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label_11 = QtGui.QLabel(self.layoutWidget1)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.gridLayout_2.addWidget(self.label_11, 0, 0, 1, 1)
self.label_8 = QtGui.QLabel(self.layoutWidget1)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.gridLayout_2.addWidget(self.label_8, 0, 2, 1, 1)
self.supplierPhoneLabel = QtGui.QLabel(self.layoutWidget1)
self.supplierPhoneLabel.setText(_fromUtf8(""))
self.supplierPhoneLabel.setObjectName(_fromUtf8("supplierPhoneLabel"))
self.gridLayout_2.addWidget(self.supplierPhoneLabel, 0, 3, 1, 1)
self.label_9 = QtGui.QLabel(self.layoutWidget1)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.gridLayout_2.addWidget(self.label_9, 1, 2, 1, 1)
self.supplierFaxLabel = QtGui.QLabel(self.layoutWidget1)
self.supplierFaxLabel.setText(_fromUtf8(""))
self.supplierFaxLabel.setObjectName(_fromUtf8("supplierFaxLabel"))
self.gridLayout_2.addWidget(self.supplierFaxLabel, 1, 3, 1, 1)
self.label_7 = QtGui.QLabel(self.layoutWidget1)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout_2.addWidget(self.label_7, 2, 0, 1, 1)
self.supplierContactLabel = QtGui.QLabel(self.layoutWidget1)
self.supplierContactLabel.setText(_fromUtf8(""))
self.supplierContactLabel.setObjectName(_fromUtf8("supplierContactLabel"))
self.gridLayout_2.addWidget(self.supplierContactLabel, 2, 1, 1, 1)
self.label_10 = QtGui.QLabel(self.layoutWidget1)
self.label_10.setObjectName(_fromUtf8("label_10"))
self.gridLayout_2.addWidget(self.label_10, 2, 2, 1, 1)
self.supplierEmailLabel = QtGui.QLabel(self.layoutWidget1)
self.supplierEmailLabel.setText(_fromUtf8(""))
self.supplierEmailLabel.setObjectName(_fromUtf8("supplierEmailLabel"))
self.gridLayout_2.addWidget(self.supplierEmailLabel, 2, 3, 1, 1)
self.supplierAddressLabel = QtGui.QLabel(self.layoutWidget1)
self.supplierAddressLabel.setText(_fromUtf8(""))
self.supplierAddressLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.supplierAddressLabel.setWordWrap(True) |
# Copyright 2015, Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Licens | e is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from getting_started import ma | in
def test_main(cloud_config, capsys):
main(cloud_config.project)
out, _ = capsys.readouterr()
assert re.search(re.compile(
r'Query Results:.hamlet', re.DOTALL), out)
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Netw | orks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License a | t
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2common.models.db.rule_enforcement import rule_enforcement_access
from st2common.persistence.base import Access
class RuleEnforcement(Access):
impl = rule_enforcement_access
@classmethod
def _get_impl(cls):
return cls.impl
|
"""
This module contains some assorted functions used in tests
"""
from __future__ import absolute_import
import os
from importlib import import_module
from twisted.trial.unittest import SkipTest
from scrapy.exceptions import NotConfigured
from scrapy.utils.boto import is_botocore
def assert_aws_environ():
"""Asserts the current environment is suitable for running AWS testsi.
Raises SkipTest with the reason if it's not.
"""
skip_if_no_boto()
if 'AWS_ACCESS_KEY_ID' not in os.environ:
raise SkipTest("AWS keys not found")
def assert_gcs_environ():
if 'GCS_PROJECT_ID' not in os.environ:
raise SkipTest("GCS_PROJECT_ID not found")
def skip_if_no_boto():
try:
is_botocore()
except NotConfigured as e:
raise SkipTest(e)
def get_s3_content_and_delete(bucket, path, with_key=False):
""" Get content from s3 key, and delete key afterwards.
"""
if is_botocore(): |
import botocore.session |
session = botocore.session.get_session()
client = session.create_client('s3')
key = client.get_object(Bucket=bucket, Key=path)
content = key['Body'].read()
client.delete_object(Bucket=bucket, Key=path)
else:
import boto
# assuming boto=2.2.2
bucket = boto.connect_s3().get_bucket(bucket, validate=False)
key = bucket.get_key(path)
content = key.get_contents_as_string()
bucket.delete_key(path)
return (content, key) if with_key else content
def get_gcs_content_and_delete(bucket, path):
from google.cloud import storage
client = storage.Client(project=os.environ.get('GCS_PROJECT_ID'))
bucket = client.get_bucket(bucket)
blob = bucket.get_blob(path)
content = blob.download_as_string()
bucket.delete_blob(path)
return content, blob
def get_crawler(spidercls=None, settings_dict=None):
"""Return an unconfigured Crawler object. If settings_dict is given, it
will be used to populate the crawler settings with a project level
priority.
"""
from scrapy.crawler import CrawlerRunner
from scrapy.spiders import Spider
runner = CrawlerRunner(settings_dict)
return runner.create_crawler(spidercls or Spider)
def get_pythonpath():
"""Return a PYTHONPATH suitable to use in processes so that they find this
installation of Scrapy"""
scrapy_path = import_module('scrapy').__path__[0]
return os.path.dirname(scrapy_path) + os.pathsep + os.environ.get('PYTHONPATH', '')
def get_testenv():
"""Return a OS environment dict suitable to fork processes that need to import
this installation of Scrapy, instead of a system installed one.
"""
env = os.environ.copy()
env['PYTHONPATH'] = get_pythonpath()
return env
def assert_samelines(testcase, text1, text2, msg=None):
"""Asserts text1 and text2 have the same lines, ignoring differences in
line endings between platforms
"""
testcase.assertEqual(text1.splitlines(), text2.splitlines(), msg)
|
## @package csnStandardModuleProject
# Definition of the methods used for project configuration.
# This should be the only CSnake import in a project configuration.
import csnUtility
import csnProject
import csnBuild
import os.path
import inspect
from csnProject import GenericProject
class StandardModuleProject(GenericProject):
""" GenericProject with applications and modules in specific folders. """
def __init__(self, _name, _type, _sourceRootFolder = None, _categories = None):
if _sourceRootFolder is None:
filename = csnProject.FindFilename(1)
dirname = os.path.dirname(filename)
_sourceRootFolder = csnUtility.NormalizePath(dirname, _correctCase = False)
GenericProject.__init__(self, _name=_name, _type=_type, _sourceRootFolder=_sourceRootFolder, _categories=_categories, _context=csnProject.globalCurrentContext)
self.applicationsProject = None
def AddLibraryModules(self, _libModules):
"""
Adds source files (anything matching *.c??) and public include folders to self, using a set of libmodules.
It is assumed that the root folder of self has a subfolder called libmodules. The subfolders of libmodules should
contain a subfolder called src (e.g. for mymodule, this would be libmodules/mymodule/src).
If the src folder has a subfolder called 'stub', it is also added to the source tree.
_libModules - a list of subfolders of the libmodules folder that should be 'added' to self.
"""
# add sources
sourceRootFolder = self.GetSourceRootFolder()
includeFileExtensions = csnUtility.GetIncludeFileExtensions()
sourceFileExtensions = csnUtility.GetSourceFileExtensions()
for libModule in _libModules:
for stub in ("/stub", ""):
srcFolder = "libmodules/%s/src%s" % (libModule, stub)
srcFolderAbs = "%s/%s" % (sourceRootFolder, srcFolder)
if( os.path.exists(srcFolderAbs) ):
self.AddIncludeFolders([srcFolder])
for extension in sourceFileExtensions:
self.AddSources(["%s/*.%s" % (srcFolder, extension)], _checkExists = 0)
for extension in includeFileExtensions:
self.AddSources(["%s/*.%s" % (srcFolder, extension)], _checkExists = 0)
for libModule in _libModules:
for stub in ("/stub", ""):
includeFolder = "libmodules/%s/include%s" % (libModule, stub)
includeFolderAbs = "%s/%s" % (sourceRootFolder, includeFolder)
if( os.path.exists(includeFolderAbs) ):
self.AddIncludeFolders([includeFolder])
for extension in includeFileExtensions:
self.AddSources(["%s/*.%s" % (includeFolder, extension)], _checkExists = 0)
def AddApplications(self, _modules, _pch="", _applicationDependenciesList=None, _holderName=None, _properties = []):
"""
Creates extra CSnake projects, each project building one application in the 'Applications' subfolder of the current project.
_modules - List of the subfolders within the 'Applications' subfolder that must be scanned for applications.
_pch - If not "", this is the include file used to generate a precompiled header for each application.
"""
dependencies = [self]
if not _applicationDependenciesList is None:
dependencies.extend(_applicationDependenciesList)
if _holderName is None:
_holderName = "%sApplications" % self.name
csnProject.globalCurrentContext.SetSuperSubCategory("Applications", _holderName)
if self.applicationsProject is None:
self.applicationsProject = csnBuild.Project(self.name + "Applications", "container", _sourceRootFolder = self.GetSourceRootFolder(), _categories = [_holderName])
#self.applicationsProject.AddSources([csnUtility.GetDummyCppFilename()], _sourceGroup = "CSnakeGeneratedFiles")
self.applicationsProject.AddProjects([self])
self.AddProjects([self.applicationsProject], _dependency = 0)
# look for an 'applications' or 'Applications' folder
_modulesFolder = "%s/applications" % self.GetSourceRootFolder()
if not os.path.exists(_modulesFolder):
_modulesFolder = "%s/Applications" % self.GetSourceRootFolder()
self.__AddApplications(self.applicationsProject, dependencies, _modules, _modulesFolder, _pch, _holderName, _properties)
def __AddApplications(self, _holderProject, _applicationDependenciesList, _modules, _modulesFolder, _pch = "", _holderName=None, _properties = []):
"""
Creates application projects and adds them to _holderProject (using _holderProject.AddProject). The holder
project does not depend on these application projects.
It is assumed that _modules is a list containing subfolders of _modulesFolder.
Each subfolder in _modules should contain source files (.cpp, .cxx or .cc), where each source file corresponds to a single application.
Hence, each source file is used to create a new application project. For example, assuming that the _modulesFolder
is called 'Applications', the file 'Applications/Small/Tiny.cpp' will be used to build the 'Tiny' application.
_applicationDependenciesList - List of projects that each new application project is dependent on.
|
_modulesFolder - Folder containing subfolders with applications.
_modules = List of subfolders of _modulesFolder that s | hould be processed.
_pch - If not "", this is the C++ include file which is used for building a precompiled header file for each application.
"""
for module in _modules:
moduleFolder = "%s/%s" % (_modulesFolder, module)
sourceFiles = []
headerFiles = []
for extension in csnUtility.GetSourceFileExtensions():
sourceFiles.extend(_holderProject.Glob("%s/*.%s" % (moduleFolder, extension)))
for extension in csnUtility.GetIncludeFileExtensions():
headerFiles.extend(_holderProject.Glob("%s/*.%s" % (moduleFolder, extension)))
for sourceFile in sourceFiles:
if os.path.isdir(sourceFile):
continue
name = os.path.splitext( os.path.basename(sourceFile) )[0]
name = name.replace(' ', '_')
if _holderName is None:
_holderName = _holderProject.name
app = csnBuild.Project("%s_%s" % (_holderName, name), "executable", _sourceRootFolder = _holderProject.GetSourceRootFolder())
app.AddIncludeFolders([moduleFolder])
app.AddProjects(_applicationDependenciesList)
app.AddSources([sourceFile])
app.AddProperties( _properties )
# add header files so that they appear in visual studio
app.AddSources(headerFiles)
if( _pch != "" ):
app.SetPrecompiledHeader(_pch)
_holderProject.AddProjects([app])
|
# This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, e | ither express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mapproxy.util.collections import LRU, ImmutableDictList
from nose.tools import eq_, raises
class TestLRU(object):
@raises(KeyError)
def test_missing_ke | y(self):
lru = LRU(10)
lru['foo']
def test_contains(self):
lru = LRU(10)
lru['foo1'] = 1
assert 'foo1' in lru
assert 'foo2' not in lru
def test_repr(self):
lru = LRU(10)
lru['foo1'] = 1
assert 'size=10' in repr(lru)
assert 'foo1' in repr(lru)
def test_getitem(self):
lru = LRU(10)
lru['foo1'] = 1
lru['foo2'] = 2
eq_(lru['foo1'], 1)
eq_(lru['foo2'], 2)
def test_get(self):
lru = LRU(10)
lru['foo1'] = 1
eq_(lru.get('foo1'), 1)
eq_(lru.get('foo1', 2), 1)
def test_get_default(self):
lru = LRU(10)
lru['foo1'] = 1
eq_(lru.get('foo2'), None)
eq_(lru.get('foo2', 2), 2)
def test_delitem(self):
lru = LRU(10)
lru['foo1'] = 1
assert 'foo1' in lru
del lru['foo1']
assert 'foo1' not in lru
def test_empty(self):
lru = LRU(10)
assert bool(lru) == False
lru['foo1'] = '1'
assert bool(lru) == True
def test_setitem_overflow(self):
lru = LRU(2)
lru['foo1'] = 1
lru['foo2'] = 2
lru['foo3'] = 3
assert 'foo1' not in lru
assert 'foo2' in lru
assert 'foo3' in lru
def test_length(self):
lru = LRU(2)
eq_(len(lru), 0)
lru['foo1'] = 1
eq_(len(lru), 1)
lru['foo2'] = 2
eq_(len(lru), 2)
lru['foo3'] = 3
eq_(len(lru), 2)
del lru['foo3']
eq_(len(lru), 1)
class TestImmutableDictList(object):
def test_named(self):
res = ImmutableDictList([('one', 10), ('two', 5), ('three', 3)])
assert res[0] == 10
assert res[2] == 3
assert res['one'] == 10
assert res['three'] == 3
assert len(res) == 3
def test_named_iteritems(self):
res = ImmutableDictList([('one', 10), ('two', 5), ('three', 3)])
itr = res.iteritems()
eq_(next(itr), ('one', 10))
eq_(next(itr), ('two', 5))
eq_(next(itr), ('three', 3))
try:
next(itr)
except StopIteration:
pass
else:
assert False, 'StopIteration expected' |
#!/usr/bin/env python
#########################################
#
# fasta2tax.py
#
########################################
import sys,os
import argparse
import pymysql as MySQLdb
import json
py_pipeline_path = os.path.expanduser('~/programming/py_mbl_sequencing_pipeline')
#my $rdpFile = "$inputfile.rdp";
print "rdp file: start\n";
#my $rdpFile = dirname($inputfile)."/$project--$dataset.fa.rdp";
rdpFile = inputfile+".rdp";
#my $rdpFile = "$project--$dataset.fa.rdp";
print "rdp file: rdpFile\n";
loadFile1 = inputfile+".load1"
loadFile2 = inputfile+".load2"
outFile = inputfile+".rdpout"
logFile = inputfile+".rdplog"
# $logFile => /usr/local/www/vamps/tmp/fasta2tax.log
if DEBUG:
print "DEBUG: Invoked with arguments (post processing):\n"
print "DEBUG: user: user\n"
print "DEBUG: inputfile: inputfile\n"
print "DEBUG: project: project\n"
print "DEBUG: dataset: dataset\n"
print "DEBUG: path_to_apps: path_to_apps\n"
print "DEBUG: database: database\n"
print "DEBUG: table1: table1\n"
print "DEBUG: table2: table2\n"
print "DEBUG: db_user: db_user\n"
print "DEBUG: db_password: db_password\n"
print "DEBUG: db_hostname: db_hostname\n"
#######################################
#
# Do sanity checking for presence of
# values from argument processing...
#
#######################################
#######################################
#
# Run RDP and rdp_file_creator...
#
#######################################
def run(project):
path_to_rdp = py_pipeline_path+"/bin/rdp"
print path_to_rdp
rdpCmd = path_to_rdp+' ' +inputfile+' '+rdpFile
print "Preparing to execute RDP Command: rdpCmd\n";
rdpCmdOutput = subprocess.check_output(rdpCmd, shell=True)
#my $rdpCheckCmd = "$path_to_apps/rdp_checker -q -log $logFile -b 80 -project \"$project\" -dataset \"$dataset\" -f1 $loadFile1 -f2 $loadFile2 $rdpFile";
rdpCheckCmd = py_pipeline_path+"/bin/rdp_file_creator -s database -q -log logFile -b 80 -project \"$project\" -dataset \"$dataset\" -f1 $loadFile1 -f2 $loadFile2 $rdpFile";
rdpCheckOutput = subprocess.check_output(rdpCheckCmd, shell=True)
# $DEBUG && print "DEBUG: rdp_file_creator exited with result code: $rdpCheckExitCode<br><br>\n";
# if ($DEBUG) {
# my @rdpCheckOutput_lines = split /\n/, $rdpCheckOutput;
# foreach my $output_line (@rdpCheckOutput_lines) {
# print "DEBUG: $output_line<br>\n";
# }
# }
# my $rdpCheckExitString;
# if ($rdpCheckExitCode == 0) {
# $rdpCheckExitString = "0";
# } elsif ($rdpCheckExitCode == 253) {
# $rdpCheckExitString = "RDP boot score value is not valid.";
# } elsif ($rdpCheckExitCode == 254) {
# $rdpCheckExitString = "Taxonomy file is not valid.";
# } elsif ($rdpCheckExitCode == 255) {
# $rdpCheckExitString = "Internal error: Could not locate taxonomy file.";
# } else {
# $rdpCheckExitString = "Unknown error.";
# }
#
# if ($rdpCheckExitCode != 0) {
# print "Error performing RDP taxonomic checks: $rdpCheckExitString. Data has not been uploaded. Project=\"$project\", Dataset=\"$dataset\", User name=\"$user\"\n";
# exit $rdpCheckExitCode;
# }
#######################################
#
# Load the final taxonomy into the tables specified in the @tables array...
# It would be really nice if we could roll this back on failure.
#
#######################################
# my $dsn = "dbi:mysql:$database:$db_hostname";
# #$DEBUG && print "DEBUG: Connecting to database\n$dsn\n";
#
# my $dbh = DBI->connect($dsn, $db_user, $db_password) or die "Unable to connect to $database database\n";
#
# if ($use_transactions) {
# # Encapsulate the changes to these tables in a transaction...
# my $query = "START TRANSACTION";
# my $handle = $dbh->prepare($query) or die "Unable to prepare query: $query\n";
# $handle->execute or die "Unable to execute query: $query\n";
# }
#
# my %load_files = ($table1 => $loadFile1, $table2 => $loadFile2);
# foreach (keys %load_files) {
# # Get a table...
# # Table1 = vamps_data_cube_uploads, Table2 = vamps_junk_data_cube_pipe;
# my $table = $_;
#
# # Clear out the old data and replace with the new
# #$DEBUG && print "DEBUG: Removing old project/dataset records from table $dsn.$table...\n";
# my $cleanQuery = "delete from $table where project='" . $project ."' and dataset = '" . $dataset . "'";
# #$DEBUG && print "DEBUG: Preparing query: \"$cleanQuery\"...\n";
# my $clean_h = $dbh->prepare($cleanQuery) or die "Unable to prepare query: $cleanQuery\n";
# $clean_h->execute or die "Unable to execute query: $cleanQuery\n";
#
# # Add the new data into the table
# #$DEBUG && print "DEBUG: Loading final taxonomy into the table $dsn.$table...\n";
#
# # Set up the query to Load the data
# my $loadQuery = "load data local infile '" . $load_files{$table} . "' replace into table $table fields terminated by '\t' lines terminated by '\n'
# set classifier='RDP'";
#
# #$DEBUG && print "DEBUG: Preparing query: \"$loadQuery\"...\n";
#
# my $load_h = $dbh->prepare($loadQuery) or die "Unable to prepare query: $loadQuery\n";
#
# $load | _h->execute or die "Unable to execute query: $loadQuery | \n";
#
# if ($dbh->err) {
# if ($use_transactions) {
# # Encapsulate the changes to these tables in a transaction...
# my $query = "ROLLBACK";
# my $handle = $dbh->prepare($query) or die "Unable to prepare query: $query\n";
# $handle->execute or die "Unable to execute query: $query\n";
# }
# print "Application Error: An error has occured while trying to load the data into the MySQL database. The following query was used: \"$loadQuery\".\n";
# print "The database engine reports the error as: \"".$dbh->errstr."\".\n";
# print "This is a fatal error. Exiting.\n";
# exit 1;
# }
# }
#
# if ($use_transactions) {
# # commit the transaction...
# my $query = "COMMIT";
# my $handle = $dbh->prepare($query) or die "Unable to prepare query: $query\n";
# $handle->execute or die "Unable to execute query: $query\n";
# }
#$DEBUG && print "DEBUG: Cleaning out tmp files...\n";
# foreach my $i ($inputfile, $rdpFile, $loadFile1, $loadFile2, $logFile)
# {
# #my $rmErr = system("rm -f $i");
# }
#$DEBUG && print "DEBUG: Execution complete.\n";
#print "Done and clean from fasta2tax.pl<br>\n";
|
"""
Project ad | ditional elements
"""
from .menu_button impo | rt *
from .new_key_qframe import *
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# This software is licensed as described in the README.rst and LICENSE
# files, which you should have received as part of this distribution.
import setuptools
# noinspection PyPep8Naming
from raspi_pir impo | rt __version__ | as VERSION
DEPS = ['RPi.Sensor>=0.5.3']
CLASSIFIERS = [
'Environment :: Console',
'Intended Audience :: System Administrators',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Operating System :: Unix',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Development Status :: 4 - Beta',
'Topic :: Utilities',
'Topic :: Home Automation',
'Topic :: System :: Hardware',
'Topic :: Terminals'
]
with open("README.rst", "r") as fp:
sensor_long_description = fp.read()
setuptools.setup(
name='RPi.PIR',
version=VERSION,
author="Richard von Kellner",
author_email="richard.kellner [at] gmail.com",
url="https://github.com/ricco386/RPi",
description='PIR sensor state monitor',
long_description=sensor_long_description,
license="MIT",
packages=setuptools.find_packages(),
classifiers=CLASSIFIERS,
install_requires=DEPS,
scripts=['bin/raspi-pir'],
include_package_data=True
)
|
#!/usr/bin/python
#requires the following:
#sudo apt-get install curl
#curl http://apt.wxwidgets.org/key.asc | apt-key add -
#sudo apt-get update
#sudo apt-get install python-wxgtk2.8 python-wxtools wx2.8-i18n
#sudo apt-get install python-gdata
import wx
import os
import sys
def pinger():
f = os.popen('ping -c 1 google.com')
y = ''
for x in f.readlines():
y += x
a = y.find('--- google.com ping statistics ---')
return a
#a = pinger()
"""abc = wx.ShowMessageDialog(None,-1,'No Internet connectoin found!. Will now exit','Error')
abc.ShowModal()
abc.Destroy()
self.Destroy()
return False"""
#uplist = ['115.com','2shared','4shared','Badongo','Data.hu','DepositFiles','divShare','dl.free.fr','Humyo','Mediafire*','Megaupload','Netload.in','Rapidshare*','Sendspace','Uploading.com','Usershare','x7.to','ZShare']
uplist = ['Megaupload','2shared','Mediafire*','ZShare']
uplist2 =['megaupload','2shared','mediafire','zshare']
class FinalFrame(wx.Frame):
def __init__(self):
pass
mupl = ''
tshl = ''
medl = ''
zshl = ''
def add(self,typ,string):
if typ == 0:
self.mupl += string + '\n\n'
elif typ == 1:
self.tshl += string + '\n\n'
elif typ == 2:
self.medl += string + '\n\n'
elif typ == 3:
self.zshl += string + '\n\n'
def doit(self):
self.display(self.mupl,self.tshl,self.medl,self.zshl)
self.Show()
def display(self,megaupload_links,tshared_links,mediafire_links,zshare_links):
wx.Frame.__init__(self,None,-1,'Upload Complete!',size=(600,550))
self.panel = wx.Panel(self)
wx.StaticText(self.panel,-1,'Your Upload has completed :) Here are your links:',pos = (30,30))
wx.StaticText(self.panel,-1,'Megaupload links:',pos=(30,80))
mupld_link_box = wx.TextCtrl(self.panel,-1,megaupload_links,size=(540,80),pos=(30,100),style=wx.TE_MULTILINE | wx.TE_READONLY)
wx.StaticText(self.panel,-1,'2shared links:',pos=(30,190))
tshrd_link_box = wx.TextCtrl(self.panel,-1,tshared_links,size=(540,80),pos=(30,210),style=wx.TE_MULTILINE | wx.TE_READONLY)
wx.StaticText(self.panel,-1,'Mediafire links:',pos=(30,300))
mfire_link_box = wx.TextCtrl(self.panel,-1,mediafire_links,size=(540,80),pos=(30,320),style=wx.TE_MULTILINE | wx.TE_READONLY)
wx.StaticText(self.panel,-1,'ZShare Links:',pos=(30,410))
zshre_link_box = wx.TextCtrl(self.panel,-1,zshare_links,size=(540,80),pos=(30,430),style=wx.TE_MULTILINE | wx.TE_READONLY)
class MyFrame(wx.Frame):
fframe = FinalFrame()
def __init__(self):
self.param = ''
self.check=0
self.args = sys.argv[1:]
if len(self.args)==0:
self.check=1
wx.Frame.__init__(self,None,-1,'Pshare',size=(600,330))
self.panel = wx.Panel(self)
wx.StaticText(self.panel,-1,'Welcome to the Plowshare Uploader GUI.\n\nThis app lets you upload any file to any of the supported file-sharing sites. To proceed, please select one (or more) of the uploading sites:',pos = (30,30), size = (540,70))
wx.StaticText(self.panel,-1,'Available Sites to upload:',pos = (30,160))
self.choice_box = wx.ListBox(se | lf.panel,-1,(30,120),(540,100),uplist, wx.LB_EXTENDED | wx.LB_HSCROLL)
wx.StaticText(self.panel,-1,'*Upload to these sites may NOT work at the moment; developers are trying to fix the issues',pos=(30,225),size=(540,50))
if self.check==1:
self.button_browse_files = wx.Button(self.panel,-1,'Browse for files',pos=(420,270),size=(150,30))
self.button_upload = wx.Button(self.panel,-1,'Start Upload',pos=(30,270),size=(150,30))
self.button_login_mupload = wx.Bu | tton(self.panel,-1,'Login to Megaupload Account',pos=(190,270),size = (220,30))
self.Bind(wx.EVT_BUTTON,self.browsefiles,self.button_browse_files)
else:
self.button_upload = wx.Button(self.panel,-1,'Start Upload',pos=(30,270),size=(265,30))
self.button_login_mupload = wx.Button(self.panel,-1,'Login to Megaupload Account',pos=(305,270),size = (265,30))
self.Bind(wx.EVT_BUTTON,self.upload,self.button_upload)
self.Bind(wx.EVT_BUTTON,self.login_mega,self.button_login_mupload)
def upload(self,evt):
temp1 = len(self.args)
temp2 = len(self.choice_box.GetSelections())
if temp1==0:
nofile_dlg = wx.MessageDialog(None,'No files Chosen!\nChoose atleast 1 file','Error',wx.OK | wx.ICON_ERROR)
nofile_dlg.ShowModal()
nofile_dlg.Destroy()
return
if temp2==0:
nofile_dlg = wx.MessageDialog(None,'No Upload sites Chosen!\nChoose atleast 1 Upload Site','Error',wx.OK | wx.ICON_ERROR)
nofile_dlg.ShowModal()
nofile_dlg.Destroy()
return
self.udlg = wx.ProgressDialog('Processing Request','Wait while we upload your file(s)',maximum=60)
self.udlg.Update(1)
y = 0
temp2 = 30/temp1
val = 'bash ~/.plowshare/src/upload.sh '
for x in self.args:
val += '\"' + x + '\" '
y += temp2
self.udlg.Update(y)
y = 30
self.linkss = []
#print val
temp3 = self.choice_box.GetSelections()
#print temp3
for x in temp3:
temp4 = val
if uplist2[x] == 'megaupload':
temp4 += self.param
temp4 += uplist2[x]
#print temp4
file1=os.popen(temp4)
file1_lines = file1.readlines()
if len(file1_lines)==0:
err_dlg = wx.MessageDialog(None,'Upload Failed! Possible Reasons:\n1. No Internet connection\n2. Upload error (choose different upload\nsite in this case)','Error',wx.OK | wx.ICON_ERROR)
err_dlg.ShowModal()
err_dlg.Destroy()
self.udlg.Update(60)
self.udlg.Destroy()
return;
for x2 in file1_lines:
ind = x2.find('(http:')
if ind != -1:
x2 = 'Link\n====================\n' + x2[0:ind] + '\n\nDelete_link\n====================\n' + x2[ind+1:]
self.fframe.add(x,x2)
y += temp2
self.udlg.Update(y)
self.fframe.doit()
self.udlg.Update(60)
self.udlg.Destroy()
##
self.panel.Destroy()
self.Destroy()
def login_mega(self,evt):
self.username = ''
self.password = ''
ubox = wx.TextEntryDialog(None,"Please Enter Username","UserName",'username')
if ubox.ShowModal()==wx.ID_OK:
self.username = ubox.GetValue()
ubox.Destroy()
ubox = wx.TextEntryDialog(None,'Please Enter Password','Password','********',wx.TE_PASSWORD | wx.OK | wx.CANCEL)
if ubox.ShowModal()==wx.ID_OK:
self.password = ubox.GetValue()
self.param = ' -a ' + self.username + ':' + self.password + ' '
#print '\n\n' + self.param + '\n\n'
ubox.Destroy()
def browsefiles(self,evt):
filed = wx.FileDialog(None,"Choose a file",style=wx.FD_MULTIPLE)
filed.ShowModal()
a = filed.GetPaths()
# print a
if len(a) > 0:
self.args = a
# print len(self.args)
filed.Destroy()
class MyApp(wx.App):
def OnInit(self):
frame = MyFrame()
frame.Show()
return True
if __name__=='__main__':
app = MyApp(redirect=True)
app.MainLoop()
|
# | for Python3 we need a fully qualified name import
from mappyscript._mappyscript import version, version_number, load, loads, dumps, create_request, load_map_from_params, Layer, convert_sl | d |
import os
import pytest
import numpy as np
from imageio import imread
def compare_2_images(validator_path, output_path):
val_abs_path = os.path.join(os.path.dirname(__file__), validator_path)
out_abs_path = os.path.join(os.path.dirname(__file__), output_path)
val_img = imread(val_abs_path, pilmode='RGB')
out_img = imread(out_abs_path, pilmode='RGB')
assert np.all(np.equal(val_img, out_img))
def clean_test_results(output_file_no_ext):
os.remove("tests/" + output_file_no_ext + "_probs.jpg")
os.remove("tests/" + output_file_no_ext + "_seg.jpg")
os.remove("tests/" + output_file_no_ext + "_seg_blended.jpg")
os.remove("tests/" + output_file_no_ext + "_seg_read.jpg")
def test_main_flip_ade20k(cli_args_ade):
from pspnet import main
main(cli_args_ade)
compare_2_images("ade20k_test_probs.jpg", "validators/ade20k_test_probs.jpg")
compare_2_images("ade20k_test_seg.jpg", "validators/ade20k_test_seg.jpg")
compare_2_images("ade20k_test_seg_read.jpg", "validators/ade20k_test_seg_read.jpg")
clean_test_results("ade20k_test")
@pytest.mark.skip
def test_main_flip_cityscapes(cli_args_cityscapes):
"""
TODO: Add images
:param cli_args_cityscapes:
:return:
"""
from pspnet import main
main(cli_args_cityscapes)
compare_2_images("cityscapes_test_probs.jpg", "validators/cityscapes_test_probs.jpg")
compare_2_images("cityscapes_test_seg.jpg", "validators/cityscapes_test_seg.jpg")
compare_2_images("cityscapes_test_seg_read.jpg", "validators/cityscapes_test_seg_read.jpg")
clean_test_results("cityscapes_test")
@pytest.mark.skip
| def test_main_flip_voc(cli_args_voc):
"""
TODO: Add images
:param cli_args_voc:
:return:
"""
from pspnet import main
main(cli_args_voc)
compare_2_images("pascal_voc_test_probs.jpg", "validators/pascal_voc_test_probs.jpg")
compare_2_images("pascal_voc_test_seg.jpg", "validators/pascal_voc_test_seg.jpg")
compare_2_images("pascal_voc_test_seg_read.jpg", "validators/pascal_voc | _test_seg_read.jpg")
clean_test_results("pascal_voc_test") |
from django.db import models
# from django.contrib.gis.geoip import GeoIP
#
# g = GeoIP()
# Create your models here.
clas | s TempMail(models.Model):
mailfrom = models.EmailField()
mailsubj = models.CharField(max_length=20)
mailrcvd = models.DateTimeField()
mailhdrs = models.CharField()
class SavedMail(models.Model):
mailrcvd = models.DateTimeField()
mailhdrs = models.CharField()
organization = models.ForeignKey('Organization')
class Organization(models.Model):
emailsuffix = models.CharField(max_l | ength=255)
class Follower(models.Model):
email = models.EmailField() |
from django.contrib import admin
from api.models import *
admin.site.register(Question)
admin.site.register(Answer)
ad | min.site.register(Sighting)
admin.site.register(Picture)
admin.site.register(UserComment)
admin | .site.register(ExpertComment)
admin.site.register(SightingFAQ)
|
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
import functools
from io import BytesIO
from devtools_testutils.aio import recorded_by_proxy_async
from azure.core.exceptions import ServiceRequestError
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer._generated.v2_1.models import AnalyzeOperationResult
from azure.ai.formrecognizer._response_handlers import prepare_prebuilt_models
from azure.ai.formrecognizer.aio import FormRecognizerClient
from azure.ai.formrecognizer import FormRecognizerApiVersion
from asynctestcase import AsyncFormRecognizerTest
from preparers import FormRecognizerPreparer
from preparers import GlobalClientPreparer as _GlobalClientPreparer
FormRecognizerClientPreparer = functools.partial(_GlobalClientPreparer, FormRecognizerClient)
class TestIdDocumentsAsync(AsyncFormRecognizerTest):
def teardown(self):
self.sleep(4)
@pytest.mark.skip()
@FormRecognizerPreparer()
@record | ed_by_proxy_async
async def test_identity_documen | t_bad_endpoint(self, formrecognizer_test_endpoint, formrecognizer_test_api_key, **kwargs):
with open(self.identity_document_license_jpg, "rb") as fd:
my_file = fd.read()
with pytest.raises(ServiceRequestError):
client = FormRecognizerClient("http://notreal.azure.com", AzureKeyCredential(formrecognizer_test_api_key))
async with client:
poller = await client.begin_recognize_identity_documents(my_file)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_damaged_file_bytes_fails_autodetect_content_type(self, **kwargs):
client = kwargs.pop("client")
damaged_pdf = b"\x50\x44\x46\x55\x55\x55" # doesn't match any magic file numbers
with pytest.raises(ValueError):
async with client:
poller = await client.begin_recognize_identity_documents(
damaged_pdf
)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_damaged_file_bytes_io_fails_autodetect(self, **kwargs):
client = kwargs.pop("client")
damaged_pdf = BytesIO(b"\x50\x44\x46\x55\x55\x55") # doesn't match any magic file numbers
with pytest.raises(ValueError):
async with client:
poller = await client.begin_recognize_identity_documents(
damaged_pdf
)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_passing_bad_content_type_param_passed(self, **kwargs):
client = kwargs.pop("client")
with open(self.identity_document_license_jpg, "rb") as fd:
my_file = fd.read()
with pytest.raises(ValueError):
async with client:
poller = await client.begin_recognize_identity_documents(
my_file,
content_type="application/jpeg"
)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_auto_detect_unsupported_stream_content(self, **kwargs):
client = kwargs.pop("client")
with open(self.unsupported_content_py, "rb") as fd:
my_file = fd.read()
with pytest.raises(ValueError):
async with client:
poller = await client.begin_recognize_identity_documents(
my_file
)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
@recorded_by_proxy_async
async def test_identity_document_stream_transform_jpg(self, client):
responses = []
def callback(raw_response, _, headers):
analyze_result = client._deserialize(AnalyzeOperationResult, raw_response)
extracted_id_document = prepare_prebuilt_models(analyze_result)
responses.append(analyze_result)
responses.append(extracted_id_document)
with open(self.identity_document_license_jpg, "rb") as fd:
my_file = fd.read()
async with client:
poller = await client.begin_recognize_identity_documents(
identity_document=my_file,
include_field_elements=True,
cls=callback
)
result = await poller.result()
raw_response = responses[0]
returned_model = responses[1]
id_document = returned_model[0]
actual = raw_response.analyze_result.document_results[0].fields
read_results = raw_response.analyze_result.read_results
document_results = raw_response.analyze_result.document_results
page_results = raw_response.analyze_result.page_results
self.assertFormFieldsTransformCorrect(id_document.fields, actual, read_results)
# check page range
assert id_document.page_range.first_page_number == document_results[0].page_range[0]
assert id_document.page_range.last_page_number == document_results[0].page_range[1]
# Check page metadata
self.assertFormPagesTransformCorrect(id_document.pages, read_results, page_results)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
@recorded_by_proxy_async
async def test_identity_document_jpg_include_field_elements(self, client):
with open(self.identity_document_license_jpg, "rb") as fd:
id_document = fd.read()
async with client:
poller = await client.begin_recognize_identity_documents(id_document, include_field_elements=True)
result = await poller.result()
assert len(result) == 1
id_document = result[0]
self.assertFormPagesHasValues(id_document.pages)
for field in id_document.fields.values():
if field.name == "CountryRegion":
assert field.value == "USA"
continue
elif field.name == "Region":
assert field.value == "Washington"
else:
self.assertFieldElementsHasValues(field.value_data.field_elements, id_document.page_range.first_page_number)
@pytest.mark.live_test_only
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_identity_document_continuation_token(self, **kwargs):
client = kwargs.pop("client")
with open(self.identity_document_license_jpg, "rb") as fd:
id_document = fd.read()
async with client:
initial_poller = await client.begin_recognize_identity_documents(id_document)
cont_token = initial_poller.continuation_token()
poller = await client.begin_recognize_identity_documents(None, continuation_token=cont_token)
result = await poller.result()
assert result is not None
await initial_poller.wait() # necessary so azure-devtools doesn't throw assertion error
@FormRecognizerPreparer()
@FormRecognizerClientPreparer(client_kwargs={"api_version": FormRecognizerApiVersion.V2_0})
async def test_identity_document_v2(self, **kwargs):
client = kwargs.pop("client")
with open(self.identity_document_license_jpg, "rb") as fd:
id_document = fd.read()
with pytest.raises(ValueError) as e:
async with client:
await client.begin_recognize_identity_documents(id_document)
assert "Method 'begin_recognize_identity_documents' is only available for API version V2_1 and up" in str(e.value)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
@recorded_by_proxy_async
async def test_pages_kwarg_specified(self, client):
with open(self.identity_document_license_jpg, "rb") as fd:
id_document = fd.read()
async with client:
poller = await client.begin_recognize_identity_documents(id_document, pages=["1"])
assert '1' == poller._polling_method._initial_response.http_response.request.query['pages']
result = await poller.result()
assert result
|
self.browser_integration.port)
def search(self, text, type, adv=False):
"""Search on the MusicBrainz website."""
lookup = self.get_file_lookup()
getattr(lookup, type + "Search")(text, adv)
def browser_lookup(self, item):
"""Lookup the object's metadata on the MusicBrainz website."""
lookup = self.get_file_lookup()
metadata = item.metadata
albumid = metadata["musicbrainz_albumid"]
trackid = metadata["musicbrainz_trackid"]
# Only lookup via MB IDs if matched to a DataObject; otherwise ignore and use metadata details
if isinstance(item, Track) and trackid:
lookup.trackLookup(trackid)
elif isinstance(item, Album) and albumid:
lookup.albumLookup(albumid)
else:
lookup.tagLookup(
metadata["albumartist"] if item.is_album_like() else metadata["artist"],
metadata["album"],
metadata["title"],
metadata["tracknumber"],
'' if item.is_album_like() else str(metadata.length),
item.filename if isinstance(item, File) else '')
def get_files_from_objects(self, objects, save=False):
"""Return list of files from list of albums, clusters, tracks or files."""
files = set()
for obj in objects:
files.update(obj.iterfiles(save))
return list(files)
def _file_saved(self, result=None, error=None):
if error is None:
file, old_filename, new_filename = result
del self.files[old_filename]
self.files[new_filename] = file
def save(self, objects):
"""Save the specified objects."""
files = self.get_files_from_objects(objects, save=True)
for file in files:
file.save(self._file_saved, self.tagger.config.setting)
def load_album(self, id, discid=None):
id = self.mbid_redirects.get(id, id)
album = self.albums.get(id)
if album:
return album
album = Album(id, discid=discid)
self.albums[id] = album
self.album_added.emit(album)
album.load()
return album
def load_nat(self, id, node=None):
self.create_nats()
nat = self.get_nat_by_id(id)
if nat:
return nat
nat = NonAlbumTrack(id)
self.nats.tracks.append(nat)
self.nats.update(True)
if node:
nat._parse_recording(node)
else:
nat.load()
return nat
def get_nat_by_id(self, id):
if self.nats is not None:
for nat in self.nats.tracks:
if nat.id == id:
return nat
def get_release_group_by_id(self, id):
return self.release_groups.setdefault(id, ReleaseGroup(id))
def remove_files(self, files, from_parent=True):
"""Remove files from the tagger."""
for file in files:
if self.files.has_key(file.filename):
file.clear_lookup_task()
self._acoustid.stop_analyze(file)
del self.files[file.filename]
file.remove(from_parent)
def remove_album(self, album):
"""Remove the specified album."""
self.log.debug("Removing %r", album)
album.stop_loading()
self.remove_files(self.get_files_from_objects([album]))
del self.albums[album.id]
if album.release_group:
album.release_group.remove_album(album.id)
if album == self.nats:
self.nats = None
self.album_removed.emit(album)
def remove_cluster(self, cluster):
"""Remove the specified cluster."""
if not cluster.special:
self.log.debug("Removing %r", cluster)
files = list(cluster.files)
cluster.files = []
cluster.clear_lookup_task()
self.remove_files(files, from_parent=False)
self.clusters.remove(cluster)
self.cluster_removed.emit(cluster)
def remove(self, objects):
"""Remove the specified objects."""
files = []
for obj in objects:
if isinstance(obj, File):
files.append(obj)
elif isinstance(obj, Track):
files.extend(obj.linked_files)
elif isinstance(obj, Album):
self.remove_album(obj)
elif isinstance(obj, Cluster):
self.remove_cluster(obj)
if files:
self.remove_files(files)
def _lookup_disc(self, disc, result=None, error=None):
self.restore_cursor()
if error is not None:
QtGui.QMessageBox.critical(self.window, _(u"CD Lookup Error"),
_(u"Error while reading CD:\n\n%s") % error)
else:
disc.lookup()
def lookup_cd(self, action):
"""Reads CD from the selected drive and tries to lookup the DiscID on MusicBrainz."""
if isinstance(action, QtGui.QAction):
device = unicode(action.text())
else:
device = self.config.setting["cd_lookup_device"].split(",", 1)[0]
disc = Disc()
self.set_wait_cursor()
self.other_queue.put((
partial(disc.read, encode_filename(device)),
partial(self._lookup_disc, disc),
QtCore.Qt.LowEventPriority))
@property
def use_acoustid(self):
return self.config.setting["fingerprinting_system"] == "acoustid"
def analyze(self, objs):
"""Analyze the file(s)."""
| files = self.get_files_from_objects(objs)
for file in f | iles:
file.set_pending()
if self.use_acoustid:
self._acoustid.analyze(file, partial(file._lookup_finished, 'acoustid'))
# =======================================================================
# Metadata-based lookups
# =======================================================================
def autotag(self, objects):
for obj in objects:
if obj.can_autotag():
obj.lookup_metadata()
# =======================================================================
# Clusters
# =======================================================================
def cluster(self, objs):
"""Group files with similar metadata to 'clusters'."""
self.log.debug("Clustering %r", objs)
if len(objs) <= 1 or self.unmatched_files in objs:
files = list(self.unmatched_files.files)
else:
files = self.get_files_from_objects(objs)
fcmp = lambda a, b: (
cmp(a.discnumber, b.discnumber) or
cmp(a.tracknumber, b.tracknumber) or
cmp(a.base_filename, b.base_filename))
for name, artist, files in Cluster.cluster(files, 1.0):
QtCore.QCoreApplication.processEvents()
cluster = self.load_cluster(name, artist)
for file in sorted(files, fcmp):
file.move(cluster)
def load_cluster(self, name, artist):
for cluster in self.clusters:
cm = cluster.metadata
if name == cm["album"] and artist == cm["artist"]:
return cluster
cluster = Cluster(name, artist)
self.clusters.append(cluster)
self.cluster_added.emit(cluster)
return cluster
# =======================================================================
# Utils
# =======================================================================
def set_wait_cursor(self):
"""Sets the waiting cursor."""
QtGui.QApplication.setOverrideCursor(
QtGui.QCursor(QtCore.Qt.WaitCursor))
def restore_cursor(self):
"""Restores the cursor set by ``set_wait_cursor``."""
QtGui.QApplication.restoreOverrideCursor()
def refresh(self, objs):
for obj in objs:
obj.load()
@classmethod
def instance(cls):
return cls.__instance
def num_files(self):
return len(self.files)
def help():
print """Usage: %s [OPTIONS] [FILE] [FILE] ...
Options:
-d, --debug |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HMM9_if_IsolatedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HMM9_if_IsolatedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HMM9_if_IsolatedLHS, self).__init__(name='HMM9_if_IsolatedLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'MM9_if')
# Set the node attributes
# Add the attribute equations
self["equations"] = []
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as param | eter
and returns the node corresponding to that label.
"""
#======================================== | =======================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
|
#!/usr/bin/env python
import argpa | rse
import logging
import os
import shutil
import sys
import glob
# Location of saved templates
SAVE_DIR = os.environ.get("RECYCLE_TEMPLATES_DIR", "~/.recycle")
try:
input = raw_input
except NameError:
pass
def should_overwrite(typeOfThing, path):
assert os.path.exists(path)
nameOfThing = get_name(path)
| logging.debug("{} already exists. Asking to overwrite...".format(path))
res = ""
while res != "y" and res != "n":
prompt = "{0} {1} already exists. Do you want to replace it? " \
"(y/n) ".format(typeOfThing, nameOfThing)
res = input(prompt)
res = res.lower()
if res == "y":
logging.debug("Overwrite approved. Deleting {}".format(path))
return True
else:
logging.debug("Overwrite denied.")
return False
def copy(contents, dest):
if not os.path.isdir(dest):
os.makedirs(dest)
for obj in contents:
name = os.path.basename(os.path.normpath(obj))
destName = os.path.join(dest, name)
if os.path.exists(destName):
if should_overwrite("File or directory", destName):
if os.path.isdir(destName):
shutil.rmtree(destName)
else:
os.remove(destName)
else:
continue
assert not os.path.exists(destName)
if os.path.isdir(obj):
shutil.copytree(obj, destName)
elif os.path.isfile(obj):
shutil.copy(obj, dest)
else:
raise IOError("Source doest not exist!")
def get_name(path):
return os.path.basename(os.path.normpath(path))
def get_save_path(templateName):
global SAVE_DIR
return os.path.join(SAVE_DIR, templateName)
def setup_logging():
global SAVE_DIR
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s %(levelname)-8s %(message)s",
datefmt="%m-%d %H:%M",
filename=os.path.join(SAVE_DIR, "recycle.log"),
filemode="w")
console = logging.StreamHandler()
# INFO or higher goes to console
console.setLevel(logging.INFO)
formatter = logging.Formatter("%(levelname)-8s %(message)s")
console.setFormatter(formatter)
logging.getLogger("").addHandler(console)
def init():
global SAVE_DIR
SAVE_DIR = os.path.expanduser(SAVE_DIR)
SAVE_DIR = os.path.expandvars(SAVE_DIR)
SAVE_DIR = os.path.abspath(SAVE_DIR)
if not os.path.isdir(SAVE_DIR):
os.makedirs(SAVE_DIR)
setup_logging()
logging.debug("Using Python version {}".format(sys.version))
def handle_new(name, files):
save_path = get_save_path(name)
fileList = []
for filename in files:
fileList += [os.path.abspath(f) for f in glob.glob(filename)]
# Remove duplicates
fileList = list(set(fileList))
if len(fileList) is 0:
logging.error("No files found matching '{}'".format(files))
return
if os.path.isdir(save_path):
# Boilerplate with that name already exists
if should_overwrite("Template", save_path):
handle_delete(name)
else:
return
assert not os.path.isdir(save_path)
logging.debug("Creating new template '{}' from {}".format(name, files))
try:
copy(fileList, save_path)
except IOError as e:
logging.error(e.strerror)
assert os.path.isdir(save_path)
logging.debug("Boilerplate created!")
def handle_use(name):
save_path = get_save_path(name)
if os.path.isdir(save_path):
logging.debug("Using template '{}'".format(name))
contents = os.listdir(save_path)
contentPaths = [os.path.join(save_path, c) for c in contents]
try:
copy(contentPaths, os.getcwd())
except IOError as e:
logging.error("Your recycle directory doesn't seem to exist...")
else:
logging.error("No template with the name '{}' was found!".format(name))
def handle_list():
global SAVE_DIR
assert os.path.isdir(SAVE_DIR)
names = next(os.walk(SAVE_DIR))[1]
for line in names:
if line.startswith(SAVE_DIR):
line = line[len(SAVE_DIR):-1]
print(line)
def handle_delete(name):
save_path = get_save_path(name)
if os.path.isdir(save_path):
shutil.rmtree(save_path)
else:
logging.error("No template with the name '{}' was found!".format(name))
assert not os.path.isdir(save_path)
def handle_location():
global SAVE_DIR
print(os.path.normpath(SAVE_DIR) + os.sep)
def parseargs():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
new_parser = subparsers.add_parser(
"new", help="Create a new template or overwrite an existing one")
new_parser.add_argument(
"name", type=str, help="The name under which to save this template")
new_parser.add_argument(
"files", type=str, nargs="+",
help="The file or directory to save as the template")
new_parser.set_defaults(mode="new")
use_parser = subparsers.add_parser(
"use", help="Insert existing template in the current directory")
use_parser.add_argument(
"name", type=str, help="The name of the template to use")
use_parser.set_defaults(mode="use")
list_parser = subparsers.add_parser(
"list", help="List the available template")
list_parser.set_defaults(mode="list")
delete_parser = subparsers.add_parser(
"delete", help="Delete a template")
delete_parser.add_argument(
"name", type=str, help="The name of the template to delete")
delete_parser.set_defaults(mode="delete")
location_parser = subparsers.add_parser(
"location", help="Print the current location of the templates directory")
location_parser.set_defaults(mode="location")
return parser.parse_args()
def main():
args = parseargs()
init()
if args.mode is None:
logging.error("Mode must be provided. Use --help for more information.")
return
if args.mode is "new":
handle_new(args.name, args.files)
elif args.mode is "use":
handle_use(args.name)
elif args.mode is "list":
handle_list()
elif args.mode is "delete":
handle_delete(args.name)
elif args.mode is "location":
handle_location()
else:
logging.error("Invalid mode")
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
"""
Ozone Bricklet Plugin
Copyright (C) 2015 Olaf Lüke <olaf@tinkerforge.com>
ozone.py: Ozone Bricklet Plugin Implementation
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QVBoxLayout, QLabel, QHBoxLayout, QSpinBox
from brickv.plugin_system.plugin_base import PluginBase
from brickv.bindings.bricklet_ozone import BrickletOzone
from brickv.plot_widget import PlotWidget
from brickv.async_call import async_call
from brickv.callback_emulator import CallbackEmulator
class OzoneConcentrationLabel(QLabel):
def setText(self, text):
text = "Ozone Concentration: " + text + " ppb (parts per billion)"
super(OzoneConcentrationLabel, self).setText(text)
class Ozone(PluginBase):
def __init__(self, *args):
PluginBase.__init__(self, BrickletOzone, *args)
self.ozone = self.device
self.cbe_ozone_concentration = CallbackEmulator(self.ozone.get_ozone_concentration,
self.cb_ozone_concentration,
self.increase_error_count)
self.ozone_concentration_label = OzoneConcentrationLabel('Ozone Concentration: ')
self.current_value = None
plot_list = [['', Qt.red, self.get_current_value]]
self.plot_widget = PlotWidget('Ozone Concentration [ppb]', plot_list)
layout_h2 = QHBoxLayout()
layout_h2.addStretch()
layout_h2.addWidget(self.ozone_concentration_label)
layout_h2.addStretch()
layout = QVBoxLayout(self)
layout.addLayout(layout_h2)
layout.addWidget(self.plot_widget)
self.spin_average = QSpinBox()
self.spin_average.setMinimum(1)
self.spin_average.setMaximum(50)
self.spin_average.setSingleStep(1)
self.spin_average.setValue(50)
self.spin_average.editingFinished.connect(self.spin_average_finished)
layout_h1 = QHBoxLayout()
layout_h1.addWidget(QLabel('Length of moving average:'))
layout_h1.addWidget(self.spin_average)
layout_h1.addStretch()
layout.addLayout(layout_h1)
def get_moving_average_async(self, average):
self.spin_average.setValue(average)
def start(self):
async_call(self.ozone.get_moving_average, None, self.get_moving_average_async, self.increase_error_count)
async_call(self.ozone.get_ozone_concentration, None, self.cb_ozone_concentration, self.increase_error_count)
self.cbe_ozone_concentration.set_period(100)
self.plot_widget.stop = False
def stop(self):
self.cbe_ozone_concentration.set_period(0)
self.plot_widget.s | top = True
def destroy(self):
| pass
def get_url_part(self):
return 'ozone'
@staticmethod
def has_device_identifier(device_identifier):
return device_identifier == BrickletOzone.DEVICE_IDENTIFIER
def get_current_value(self):
return self.current_value
def cb_ozone_concentration(self, ozone_concentration):
self.current_value = ozone_concentration
self.ozone_concentration_label.setText(str(ozone_concentration))
def spin_average_finished(self):
self.ozone.set_moving_average(self.spin_average.value())
|
#!/usr/bin/python
import sys
import time
import datetime
import re
import ConfigParser
import os
from operator import attrgetter
scriptdir = os.path.abspath(os.path.dirname(sys.argv[0]))
conffile = scriptdir + "/ovirt-vm-rolling-snapshot.conf"
Config = ConfigParser.ConfigParser()
if not os.path.isfile(conffile):
print "Config file %s does not exists. Exiting." % conffile
sys.exit(1)
Config.read(conffile)
if len(Config.sections()) < 1:
print "Config file is not valid. Exiting."
sys.exit(1)
basetime = datetime.datetime.now()
for vmname in Config.sections():
starttime = time.time()
try:
etime_to_keep = int(Config.get(vmname, 'etime_to_keep'))
hourly_to_keep = int(Config.get(vmname, 'hourly_to_keep'))
daily_to_keep = int(Config.get(vmname, 'daily_to_keep'))
weekly_to_keep = int(Config.get(vmname, 'weekly_to_keep'))
monthly_to_keep = int(Config.get(vmname, 'monthly_to_keep'))
time_hours = "%02d" % int(Config.get(vmname, 'time_hours'))
time_minutes = "%02d" % int(Config.get(vmname, 'time_minutes'))
time_weekday = "%d" % int(Config.get(vmname, 'time_weekday'))
time_monthweek = int(Config.get(vmname, 'time_monthweek'))
if time_monthweek < 1 or time_monthweek > 5:
time_monthweek = 1
if time_weekday == "7":
time_weekday = "0"
last_to_keep = {"____": etime_to_keep, "H___": hourly_to_keep, "HD__": daily_to_keep, "HDW_": weekly_to_keep,
"HDWM": monthly_to_keep}
hpos = dpos = wpos = mpos = "_"
if basetime.strftime("%M") == time_minutes: # minutes is 00
hpos = "H"
if basetime.strftime("%H") == time_hours: # hour is 00
dpos = "D"
if basetime.strftime("%w") == time_weekday: # day of week is sunday
wpos = "W"
if (int(basetime.strftime("%d")) <= (7 * time_monthweek)) and (
int(basetime.strftime("%d")) > (7 * (time_monthweek - 1))): # is the first week of month
mpos = "M"
snap_time_id = hpos + dpos + wpos + mpos
deleteonly = ''
if len(sys.argv) > 1:
snap_time_id = sys.argv[1]
if not last_to_keep[snap_time_id]:
last_to_keep[snap_time_id] = 1
if len(sys.argv) > 2:
deleteonly = sys.argv[2]
if last_to_keep[snap_time_id]:
print
print "------------------------------------------------------------"
print "VM name: " + vmname
try:
ovirtsdk
except:
import ovirtsdk.api
from ovirtsdk.xml import params
api = ovirtsdk.api.API(
url=Config.get(vmname, 'server'),
username=Config.get(vmname, 'username'),
password=Config.get(vmname, 'password'),
insecure=True,
debug=False
)
vm = api.vms.get(vmname)
print "Begin backup of VM '%s' at %s" % (vmname, datetime.datetime.now().isoformat(" "))
print "VM status: %s" % str(vm.get_status().state)
if deleteonly == 'deleteonly':
print "Skipping snapshot creation."
else:
snap_description = "Rolling snapshot " + snap_time_id + " at " + datetime.datetime.now().isoformat(" ")
print "Creating Snapshot '" + snap_description + "'"
snapcreation = vm.snapshots.add(params.Snapshot(description=snap_description))
snaptoclone = ""
snap_status = ""
sys.stdout.write( "Snapshot in progress..." )
sys.stdout.flush()
while True:
snaptoclone = vm.snapshots.get(id=snapcreation.get_id())
snap_status = snaptoclone.get_snapshot_status()
if snap_status == "locked":
| time.sleep(5)
sys.stdout.write('.')
sys.stdout.flush()
else:
| print
break
for snapi in vm.get_snapshots().list():
snapi_id = snapi.get_id()
if vm.snapshots.get(id=snapi_id).description == snap_description:
snap_status = "ok"
break
else:
snap_status = "error"
if snap_status != "ok":
print "Snapshot creation ERROR!!!"
continue
print "Snapshot done"
time.sleep(1)
snapshots_param = params.Snapshots(snapshot=[params.Snapshot(id=snaptoclone.get_id())])
snaptodel = []
for snapi in vm.get_snapshots().list():
snapi_id = snapi.get_id()
snapi_descr = vm.snapshots.get(id=snapi_id).description
snapi_time_match = re.match('^Rolling snapshot ' + snap_time_id + ' at', snapi_descr)
if snapi_time_match:
snaptodel.append(snapi)
snaptodel = sorted(snaptodel, key=attrgetter('creation_time'))
if last_to_keep[snap_time_id] > 0:
del snaptodel[-last_to_keep[snap_time_id]:]
for snapitodel in snaptodel:
print "Deleting old snapshot '" + snapitodel.description + "'"
snapitodel.delete(async=False)
oldsndelstatus = sndelstatus = ''
while True:
try:
sndelstatus = vm.snapshots.get(id=snapitodel.get_id()).get_snapshot_status()
except Exception, e:
break
if sndelstatus == oldsndelstatus:
sys.stdout.write('.')
else:
if sndelstatus == 'ok':
break
sys.stdout.write( "Delete snapshot in progress..." )
oldsndelstatus = sndelstatus
sys.stdout.flush()
time.sleep(5)
print
if sndelstatus == 'ok':
print "Delete snapshot ERROR!!!"
else:
print "Delete snapshot done."
eltime = time.time() - starttime
print "Finished backup of VM '%s' at %s. %d seconds." % (vmname,
datetime.datetime.now().isoformat(" "),
eltime)
print
except Exception, e:
print e
print "Backup ERROR!!!"
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2022 Pytroll developers
# Author(s):
# Adam Dybbroe <Firstname.Lastname @ smhi.se>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Unittesting the helper functions for the AAPP-runner.
"""
import logging
import unittest
from datetime import datetime
from unittest.mock import patch
from aapp_runner.helper_functions import check_if_scene_is_unique
from aapp_runner.read_aapp_config import AappL1Config, AappRunnerConfig
from aapp_runner.tests.test_config import (TEST_YAML_CONTENT_OK,
create_config_from_yaml)
class TestProcessConfigChecking(unittest.TestCase):
"""Test various functions checking on | the (non-static) config during processing."""
def setUp(self):
self.config_complete = create_config_from_yaml(TEST_YAML_CONTENT_OK)
@patch('aapp_runner.read_aapp_config.load_config_from_file')
def test_check_if_scene_is_unique_return_value(self, config):
"""Test checking if the current scene is unique or if it has been processed earlier."""
config.retu | rn_value = self.config_complete
myfilename = "/tmp/mytestfile"
aapp_run_config = AappRunnerConfig(myfilename, 'norrkoping', 'xl-band')
aapp_config = AappL1Config(aapp_run_config.config, 'xl-band')
aapp_config['platform_name'] = 'metop03'
aapp_config['collection_area_id'] = 'euron1'
aapp_config['starttime'] = datetime(2022, 1, 8, 12, 49, 50)
aapp_config['endtime'] = datetime(2022, 1, 8, 13, 0, 26)
aapp_config.job_register = {}
result = check_if_scene_is_unique(aapp_config)
assert result
aapp_config.job_register = {'metop03': [(datetime(2022, 1, 8, 12, 49, 50),
datetime(2022, 1, 8, 13, 0, 26), 'euron1')]}
# An EARS scene (same platform and overlapping time interval and over
# the same area of interest) arrives shortly after:
aapp_config['platform_name'] = 'metop03'
aapp_config['collection_area_id'] = 'euron1'
aapp_config['starttime'] = datetime(2022, 1, 8, 12, 50)
aapp_config['endtime'] = datetime(2022, 1, 8, 13, 0)
result = check_if_scene_is_unique(aapp_config)
assert not result
@patch('aapp_runner.read_aapp_config.load_config_from_file')
def test_check_if_scene_is_unique_logging(self, config):
"""Test the logging when checking if the current scene is unique."""
config.return_value = self.config_complete
myfilename = "/tmp/mytestfile"
aapp_run_config = AappRunnerConfig(myfilename, 'norrkoping', 'xl-band')
aapp_config = AappL1Config(aapp_run_config.config, 'xl-band')
aapp_config.job_register = {'metop03': [(datetime(2022, 1, 8, 12, 49, 50),
datetime(2022, 1, 8, 13, 0, 26), 'euron1')]}
# An EARS scene (same platform and overlapping time interval and over
# the same area of interest) arrives shortly after:
aapp_config['platform_name'] = 'metop03'
aapp_config['collection_area_id'] = 'euron1'
aapp_config['starttime'] = datetime(2022, 1, 8, 12, 50)
aapp_config['endtime'] = datetime(2022, 1, 8, 13, 0)
expected_logging = ['INFO:aapp_runner.helper_functions:first message',
'INFO:aapp_runner.helper_functions:Processing of scene metop03 2022-01-08 12:49:50 2022-01-08 13:00:26 with overlapping time has been launched previously. Skip it!']
with self.assertLogs('aapp_runner.helper_functions', level='INFO') as cm:
logging.getLogger('aapp_runner.helper_functions').info('first message')
_ = check_if_scene_is_unique(aapp_config)
self.assertEqual(cm.output, expected_logging)
with self.assertLogs('aapp_runner.helper_functions', level='WARNING') as cm:
logging.getLogger('aapp_runner.helper_functions').warning('first message')
_ = check_if_scene_is_unique(aapp_config)
self.assertEqual(len(cm.output), 1)
# Scene is different (different satellite) from previous:
aapp_config['platform_name'] = 'metop01'
aapp_config['collection_area_id'] = 'euron1'
aapp_config['starttime'] = datetime(2022, 1, 8, 12, 50)
aapp_config['endtime'] = datetime(2022, 1, 8, 13, 0)
with self.assertLogs('aapp_runner.helper_functions', level='INFO') as cm:
logging.getLogger('aapp_runner.helper_functions').info('first message')
result = check_if_scene_is_unique(aapp_config)
assert result
self.assertEqual(len(cm.output), 1)
|
from tests.test_helper import *
from braintree.resource import Resource
class TestResource(unittest.TestCase):
def test_verify_keys_allows_wildcard_keys(self):
signature = [
{"foo": [{"bar": ["__any_key__"]}]}
]
params = {
"foo[bar][lower]": "lowercase",
"foo[bar][UPPER]": "uppercase",
"foo[bar][123]": "numeric",
"foo[bar][under_scores]": "underscores",
"foo[bar][dash-es]": "dashes",
"foo[bar][ABC-abc_123]": "all together"
}
Resource.verify_keys(params, signature)
@raises(KeyError)
def test_verify_keys_escapes_brackets_in_signature(self):
signature = [
{"customer": [{"custom_fields": ["__any_key__"]}]}
]
params = {
"customer_id": "value",
}
Resource.verify_keys(params, signature)
def test_verify_keys_works_with_array_param(self):
signature = [
{"customer": ["one", "two"]}
]
params = {
"customer": {
"one": "foo"
}
}
Resource.verify_keys(params, signature)
@raises(KeyError)
def test_verify_keys_raises_on_bad_array_param(self):
signature = [
{"customer": ["one", "two"]}
]
params = {
"customer": {
"invalid": "foo"
}
}
Resource.verify_keys(params, signature)
| def test_verify_keys_works_with_arrays(self):
signature = [
{"add_ons": [{"update": ["existing_id", "quantity"]}]}
]
params = {
"add_ons": {
"update": [
{
"existing_id": "foo",
"quantity": 10 |
}
]
}
}
Resource.verify_keys(params, signature)
@raises(KeyError)
def test_verify_keys_raises_with_invalid_param_in_arrays(self):
signature = [
{"add_ons": [{"update": ["existing_id", "quantity"]}]}
]
params = {
"add_ons": {
"update": [
{
"invalid": "foo",
"quantity": 10
}
]
}
}
Resource.verify_keys(params, signature)
def test_verify_keys_allows_text(self):
text_string = u"text_string"
assert isinstance(text_string, TestHelper.text_type)
signature = [
{"customer": [{"custom_fields": [text_string]}]}
]
params = {
"customer": {
"custom_fields": {
text_string : text_string
}
}
}
Resource.verify_keys(params, signature)
def test_verify_keys_allows_raw_data(self):
raw_string = str.encode("raw_string")
assert isinstance(raw_string, TestHelper.raw_type)
signature = [
{"customer": [{"custom_fields": [raw_string]}]}
]
params = {
"customer": {
"custom_fields": {
raw_string : raw_string
}
}
}
Resource.verify_keys(params, signature)
|
# =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
import sys
import argparse
import pkg_resources
from difflib import get_close_matches
from .iaas_client.actions import ActionManager as IaaSActionManager
from .qs_client.actions import ActionManager as QSActionManager
SERVICES = ('iaas', 'qs')
INDENT = ' ' * 2
NEWLINE = '\n' + INDENT
def exit_due_to_invalid_service(suggest_services=None):
usage = NEWLINE + '%(prog)s <service> <action> [parameters]\n\n' \
+ 'Here are valid services:\n\n' \
+ INDENT + NEWLINE.join(SERVICES)
if suggest_services:
usage += '\n\nInvalid service, maybe you meant:\n ' \
+ ','.join(suggest_services)
parser = argparse.ArgumentParser(
prog = 'qingcloud',
usage = usage,
)
parser.print_help()
sys.exit(-1)
def exit_due_to_invalid_action(service, suggest_actions=None):
usage = NEWLINE + '%(prog)s <action> [parameters]\n\n' \
+ 'Here are valid actions:\n\n' \
+ INDENT + NEWLINE.join(get_valid_actions(service))
if suggest_actions:
usage += '\n\nInvalid action, maybe you meant:\n ' \
+ NEWLINE.join(suggest_actions)
parser = argparse.ArgumentParser(
prog = 'qingcloud %s' % service,
usage = usage,
)
parser.print_help()
sys.exit(-1)
def get_valid_actions(service):
if service == 'iaas':
| return IaaSActionManager.get_valid_actions()
elif service == 'qs':
return QSActionManager.get_valid_actions()
def get_action(service, action):
if service == 'iaas':
return Ia | aSActionManager.get_action(action)
elif service == 'qs':
return QSActionManager.get_action(action)
def check_argument(args):
if len(args) < 2:
exit_due_to_invalid_service()
if args[1].lower() in ('--version', '-v'):
version = pkg_resources.require("qingcloud-cli")[0].version
print('qingcloud-cli version %s' % version)
sys.exit(0)
service = args[1]
if service not in SERVICES:
suggest_services = get_close_matches(service, SERVICES)
exit_due_to_invalid_service(suggest_services)
if len(args) < 3:
exit_due_to_invalid_action(service)
valid_actions = get_valid_actions(service)
if args[2] not in valid_actions:
suggest_actions = get_close_matches(args[2], valid_actions)
exit_due_to_invalid_action(service, suggest_actions)
def main():
args = sys.argv
check_argument(args)
action = get_action(args[1], args[2])
action.main(args[3:])
|
# coding=utf-8
#
# This file is part of SickGear.
#
# SickGear is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickGear is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickGear. If not, see <http://www.gnu.org/licenses/>.
import re
import datetime
import traceback
from . import generic
from sickbeard import logger, tvcache, helpers
from sickbeard.bs4_parser import BS4Parser
from lib.unidecode import unidecode
class GrabTheInfoProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, 'GrabTheInfo')
self.url_base = 'http://grabthe.info/'
self.urls = {'config_provider_home_uri': self.url_base,
'login': self.url_base + 'takelogin.php',
'cache': self.url_base + 'browse.php?%s',
'search': '&search=%s',
'get': self.url_base + '%s'}
self.categories = 'c56=1&c8=1&c61=1&c10=1&incldead=0&blah=0'
self.url = self.urls['config_provider_home_uri']
self.username, self.password, self.minseed, self.minleech = 4 * [None]
self.cache = GrabTheInfoCache(self)
def _do_login(self):
logged_in = lambda: 'uid' in self.session.cookies and 'pass' in self.session.cookies
if logged_in():
return True
if self._check_auth():
login_params = {'username': self.username, 'password': self.password}
response = helpers.getURL(self.urls['login'], post_data=login_params, session=self.session)
if response and logged_in():
return True
msg = u'Failed to authenticate with %s, abort provider'
if response and 'Username or password incorrect' in response:
msg = u'Invalid username or password for %s. Check settings'
logger.log(msg % self.name, logger.ERROR)
return False
def _do_search(self, search_params, search_mode='eponly', epcount=0, age=0):
results = []
if not self._do_login():
return results
items = {'Season': [], 'Episode': [], 'Cache': []}
rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {'info': 'detail', 'get': 'download'}.items())
for mode in search_params.keys():
for search_string in search_params[mode]:
if isinstance(search_string, unicode):
search_string = unidecode(search_string)
search_url = self.urls['cache'] % self.categories
if 'cache' != mode.lower():
search_url += self.urls['search'] % search_string
html = self.get_url(search_url)
cnt = len(items[mode])
try:
if not html or self._has_no_results(html):
raise generic.HaltParseException
html = html.replace('<?xml version="1.0" encoding="iso-8859-1"?>', '')
html = re.sub(r'(</td>)[^<]*</td>', r'\1', html)
html = re.sub(r'(<a[^<]*)<a[^<]*?href=details[^<]*', r'\1', html)
with BS4Parser(html, 'html.parser') as soup:
shows_found = False
torrent_rows = soup.find_all('tr')
for index, row in enumerate(torrent_rows):
if 'type' == row.find_all('td')[0].get_text().strip().lower():
shows_found = index
break
if not shows_found or 2 > (len(torrent_rows) - shows_found):
raise generic.HaltParseException
for tr in torrent_rows[1 + shows_found:]:
try:
info = tr.find('a', href=rc['info'])
if None is info:
continue
title = (('title' in info.attrs.keys() and info['title']) or info.get_text()).strip()
download_url = tr.find('a', href=rc['get'])
if None is download_url:
continue
seeders, leechers = [int(tr.find_all('td')[x].get_text().strip()) for x in (-2, -1)]
if 'Cache' != mode and (seeders < self.minseed or leechers < self.minleech):
continue
except (AttributeError, TypeError, KeyError):
continue
if title:
items[mode].append((title, self.urls['get']
% str(download_url[ | 'href'].lstrip('/')), seeders))
except generic.HaltParseException:
pass
except Exception:
logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR)
self._log_result(mode, len(items[mode]) - cnt, search_url)
# for each search mode sort all the items by seeders
'Cache' != mode and items[mode].sort | (key=lambda tup: tup[2], reverse=True)
results += items[mode]
return results
def find_propers(self, search_date=datetime.datetime.today()):
return self._find_propers(search_date)
def _get_episode_search_strings(self, ep_obj, add_string='', **kwargs):
return generic.TorrentProvider._get_episode_search_strings(self, ep_obj, add_string, sep_date='|', use_or=False)
class GrabTheInfoCache(tvcache.TVCache):
def __init__(self, this_provider):
tvcache.TVCache.__init__(self, this_provider)
self.minTime = 20 # cache update frequency
def _getRSSData(self):
return self.provider.get_cache_data()
provider = GrabTheInfoProvider()
|
###
# Copyright (c) 2004, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
try:
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('MyPing')
except ImportError:
# Placeholder that allows to run the plugin on a bot
# without the i18n module
_ = lambda x:x
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified themself as an advanced
# user or not. You sho | uld effect your configuration by manipulating t | he
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('MyPing', True)
MyPing = conf.registerPlugin('MyPing')
# This is where your configuration variables (if any) should go. For example:
# conf.registerGlobalValue(MyPing, 'someConfigVariableName',
# registry.Boolean(False, _("""Help for someConfigVariableName.""")))
conf.registerChannelValue(MyPing, 'enable',
registry.Boolean(False, """Should plugin work in this channel?"""))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
import pf
from Var import var
import numpy,string
from Glitch import Glitch
"""A faster version of nextTok(), using memory allocated (once only)
using numpy, and using functions written in C. The slow, pure
python module is NexusToken.py. This version is about twice as fast.
Which one is used is under the control of var.nexus_doFastNextTok.
This one does not work for CStrings, so we need to revert to the old
way whenever CStrings are encountered."""
class NexusToken(object):
def __init__(self, max):
self.max = numpy.array([max], numpy.int32)
self.tokLen = numpy.array([0], numpy.int32)
self.tok = numpy.array(['x'] * int(self.max), 'c')
self.embeddedCommentLen = numpy.array([0], numpy.int32)
self.embeddedComment = numpy.array(['x'] * int(self.max), 'c')
self.savedCommentLen = numpy.array([0], numpy.int32)
self.filePtr = None
self.nexusToken = pf.newNexusToken(var._nexus_writeVisibleComments,
var._nexus_getP4CommandComments,
var._nexus_getWeightCommandComments,
var._nexus_getAllCommandComments,
var._nexus_getLineEndingsAsTokens,
self.max,
self.tokLen,
self.tok,
self.embeddedCommentLen,
self.embeddedComment,
self.savedCommentLen)
#self.previousTok = None
#self.previousEmbeddedComment = None
nt = NexusToken(300)
def checkLineLengths(flob):
global nt
#print 'NexusToken2.checkLineLengths here.'
flob.seek(0,0)
longest = pf.nexusTokenCheckLineLengths(nt.nexusToken, flob)
flob.seek(0,0)
#print 'The longest line length is %i' % longest
if longest > nt.max:
nt = NexusToken(longest)
def nextTok(flob):
#print 'NexusToken2.nextTok() here. nt.nexusToken = %i, max=%s, tokLen=%s, type(tokLen)=%s' % (nt.nexusToken, nt.max, nt.tokLen[0], type(nt.tokLen))
#assert type(nt.tokLen) == type(numpy.array([0], numpy.int32))
#print "NexusToken2.nextTok(). nt.wordIsFinished[0]=%i, nt.tokLen=%i, previousTok=%s, previousComment=%s" % (nt.wordIsFinished[0], nt.tokLen[0], nt.previousTok, nt.previousEmbeddedComment)
#if nt.wordIsFinished[0]:
# assert nt.tokLen[0]
# ret = nt.tok[:int(nt.tokLen[0])].tostring()
# nt.tokLen[0] = 0
# nt.wordIsFinished[0] = 0
# #nt.previousTok = ret
# return ret
#print | ' x1 NexusToken2.nextTok() here. savedCommentLen=%i' % nt.savedCommentLen[0]
if nt.savedCommentLen[0]:
ret = nt.embeddedComment[:int(nt.savedCommentLen[0])].tostring()
nt.savedCommentLen[0] = 0
return ret
| pf.nextToken(nt.nexusToken, flob)
#print ' x2 tokLen = %i, embeddedCommentLen[0] = %i' % (nt.tokLen[0], nt.embeddedCommentLen[0])
if nt.embeddedCommentLen[0]:
ret = nt.embeddedComment[:int(nt.embeddedCommentLen[0])].tostring()
nt.embeddedCommentLen[0] = 0
#nt.previousEmbeddedComment = ret
return ret
else:
if nt.tokLen[0]:
ret = nt.tok[:int(nt.tokLen[0])].tostring()
nt.tokLen[0] = 0
#nt.previousTok = ret
return ret
else:
return None
def safeNextTok(flob, caller=None):
t = nextTok(flob)
if not t:
if caller:
gm = ["safeNextTok(), called from %s" % caller]
else:
gm = ["safeNextTok()"]
gm.append("Premature Death.")
gm.append("Ran out of understandable things to read in nexus file.")
raise Glitch, gm
else:
return t
def nexusSkipPastNextSemiColon(flob):
pf.nexusSkipPastNextSemiColon(nt.nexusToken, flob)
def nexusSkipPastBlockEnd(flob):
"""Read up to and including a block 'end' or 'endblock'."""
# This should only ever be issued after a semi-colon
complaintHead = '\nNexus: nexusSkipPastBlockEnd()'
if hasattr(flob, 'name'):
complaintHead += " file: %s" % flob.name
while 1:
tok = nextTok(flob)
if tok:
lowTok = string.lower(tok)
if lowTok == 'end' or lowTok == 'endblock':
tok2 = nextTok(flob)
if not tok2 or tok2 != ';':
gm = [complaintHead]
gm.append(" Expecting a semicolon after %s" % tok)
if not tok2:
gm.append("Got nothing.")
else:
gm.append("Got '%s'" % tok2)
raise Glitch, gm
return
elif lowTok == ';': # for pathological cases where the last command is a ';' by itself.
continue
else:
pf.nexusSkipPastNextSemiColon(nt.nexusToken, flob)
else:
break
gm = [complaintHead]
gm.append("Failed to find either 'end' or 'endblock'")
gm.append("Premature end of file?")
raise Glitch, gm
|
import sqlite3
from sklearn import linear_model
import numpy as np
import pandas as pd
import datetime
import sys
conn = sqlite3.connect(sys.argv[1])
c = conn.cursor();
c.execute("select _id, name from tracks")
rows = c.fetchall()
track_names = pd.DataFrame([{'track_name': row[1]} for row in rows])
track_ids = [int(row[0]) for row in rows]
track_cnt = len(track_ids)
print "Found {0} tracks.".format(track_cnt)
c.execute("select * from ticks")
last_tick = c.fetchall()[-1]
last_day = datetime.date(last_tick[2], last_tick[3], last_tick[4])
def window(day, n=20):
"return a matrix of the last `n` days before day `day`"
tick_date = "date(year || '-' || substr('0' || month, -2, 2) || " + \
"'-' || substr('0' || day, -2, 2))"
max_date = "date('{d.year:04d}-{d.month:02d}-{d.day:02d}')".\
format(d=day)
min_date = "date('{d.year:04d}-{d.month:02d}-{d.day:02d}')".\
format(d=day-datetime.timedelta(n))
c.execute("select * from ticks where {d} <= {max_date} and {d} >= {min_date}".\
format(d=tick_date, max_date=max_date, min_date=min_date))
# ticktrix is the matrix containing the ticks
ticktrix = np.zeros((n, track_cnt))
for row in c.fetchall():
print row
try:
row_date = datetime.date(row[2], row[3], row[4])
except ValueError | :
print "Error constructing date from", row
x = -(row_date - day).days
y = track_ids.index(int(row[1]))
if x < n:
ticktrix[x, y] = 1
return ticktrix
last_day -= datetime.timedelta(1)
print "Fitting for d | ay:", last_day
my_window = window(last_day)
target_data = my_window[0,:].T
training_data = my_window[1:,:].T
print "Target:", target_data.shape
print target_data
print "Training:", training_data.shape
print training_data
reg = linear_model.LinearRegression()
reg.fit(training_data, target_data)
print "Coefficents:", reg.coef_.shape
print reg.coef_
print "Applied to training data:"
print np.dot(training_data, reg.coef_)
print "Forecast"
#print np.dot(my_window[:19,:].T, reg.coef_)
#print track_names
df = pd.DataFrame()
df['track'] = track_names
df['prob'] = pd.Series(np.dot(my_window[:19,:].T, reg.coef_) * 100.0)
print df
|
# -*- | coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('deals', '0002_advertiser_logo'),
]
operations = [
migrations.RemoveField(
model_name='advertiser',
name='logo',
),
]
| |
# -*- coding: utf-8 -*-
###############################################################################
#
# UpdateSigningCertificate
# Changes the status of the specified signing certificate from active to disabled, or vice versa. This action can be used to disable a user's signing certificate as part of a certificate rotation workflow.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class UpdateSigningCertificate(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the UpdateSigningCertificate Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(UpdateSigningCertificate, self).__init__(temboo_session, '/Library/Amazon/IAM/UpdateSigningCertificate')
def new_input_set(self):
return UpdateSigningCertificateInputSet()
def _make_result_set(self, result, path):
return UpdateSigningCertificateResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return UpdateSigningCertificateChoreographyExecution(session, exec_id, path)
class UpdateSigningCertificateInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the UpdateSigningCertificate
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AWSAccessKeyId(self, value):
"""
Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.)
"""
super(UpdateSigningCertificateInputSet, self)._set_input('AWSAccessKeyId', value)
def set_AWSSecretKeyId(self, value):
"""
Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.)
"""
super(UpdateSigningCertificateInputSet, self)._set_input('AWSSecretKeyId', value)
def set_CertificateId(self, value):
"""
Set the value of the CertificateId input for this Choreo. ((required, string) The ID of the signing certificate you want to update.)
"""
super(UpdateSigningCertificateInputSet, self)._set_input('CertificateId', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid v | alues are "xml" (the default) and "json".)
"""
super(UpdateSigningCertificateInputSet, self)._set_input('ResponseFormat', value)
def set_Status(self, value):
"""
Set the value of the Status input for this Choreo. ((required, string) The status you want to assign to the certificate. Active means the certificate can be used for API calls to AWS, while Inactive means the certificate cannot be use | d.)
"""
super(UpdateSigningCertificateInputSet, self)._set_input('Status', value)
def set_UserName(self, value):
"""
Set the value of the UserName input for this Choreo. ((optional, string) Name of the user the signing certificate belongs to.)
"""
super(UpdateSigningCertificateInputSet, self)._set_input('UserName', value)
class UpdateSigningCertificateResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the UpdateSigningCertificate Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Amazon.)
"""
return self._output.get('Response', None)
class UpdateSigningCertificateChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return UpdateSigningCertificateResultSet(response, path)
|
from __future__ import print_function, unicode_literals
import inspect
import six
from django import forms
from django.forms.forms import DeclarativeFieldsMetaclass
from rest_framework import serializers
from .. import fields
from ..utils import (
initialize_class_using_reference_object,
reduce_attr_dict_from_base_classes,
)
from .fields import ISO8601DateTimeField
SERIALIZER_FORM_FIELD_MAPPING = {
fields.BooleanField: forms.BooleanField,
fields.CharField: forms.CharField,
fields.ChoiceField: forms.ChoiceField,
fields.DateTimeField: ISO8601DateTimeField,
fields.EmailField: forms.EmailField,
fields.IntegerField: forms.IntegerField,
serializers.BooleanField: forms.BooleanField,
serializers.CharField: forms.CharField,
serializers.ChoiceField: forms.ChoiceField,
serializers.DateTimeField: ISO8601DateTimeField,
serializers.EmailField: forms.EmailField,
serializers.IntegerField: forms.IntegerField,
}
class SerializerFormOptions(object):
def __init__(self, options=None, name=None):
self.serializer = getattr(options, 'serializer', None)
self.fields = getattr(options, 'fields', [])
self.exclude = getattr(options, 'exclude', [])
self.field_mapping = getattr(options, 'field_mapping', {})
assert self.serializer is not None, (
'{}.Meta.serializer must be provided'
''.format(name)
)
assert issubclass(self.serializer, serializers.BaseSerializer), (
'{}.Meta.serializer must be a subclass of DRF serializer'
''.format(name)
)
class SerializerFormMeta(DeclarativeFieldsMetaclass):
def __new__(cls, name, bases, attrs):
try:
parents = [b for b in bases if issubclass(b, SerializerForm)]
except NameError:
# We are defining SerializerForm itself
parents = None
meta = attrs.pop('Meta', None)
if not parents or attrs.pop('_is_base', False):
return super(SerializerFormMeta, cls).__new__(cls, name, bases, attrs)
attrs['_met | a'] = options = SerializerFormOptions(meta, name=name)
new_attrs = cls.get_form_fields_from_serializer(bases, options)
# attrs should take priority in case a specific field is overwritten
new_attrs.update(attrs)
return super(SerializerFormMeta, cls).__new__(cls, name, bases, new_attrs)
@classmethod
def get_field_mapping(cls, bases, options):
ma | pping = reduce_attr_dict_from_base_classes(
bases,
lambda i: getattr(getattr(i, '_meta', None), 'field_mapping', {}),
SERIALIZER_FORM_FIELD_MAPPING
)
mapping.update(options.field_mapping)
return mapping
@classmethod
def get_form_fields_from_serializer(cls, bases, options):
fields = {}
mapping = cls.get_field_mapping(bases, options)
for name, field in options.serializer._declared_fields.items():
if field.read_only:
continue
if name not in options.fields or name in options.exclude:
continue
form_field_class = mapping.get(type(field))
if not form_field_class:
raise TypeError(
'{} is not mapped to appropriate form field class. '
'Please add it to the mapping via `field_mapping` '
'Meta attribute.'
''.format(type(field))
)
fields[name] = initialize_class_using_reference_object(field, form_field_class)
return fields
class SerializerFormBase(forms.Form):
def __init__(self, *args, **kwargs):
super(SerializerFormBase, self).__init__(*args, **kwargs)
# instantiated during validation
self.serializer = None
def get_serializer_context(self):
return {}
def get_serializer_data(self):
data = self.initial.copy()
data.update(self.cleaned_data or {})
return data
def get_serializer(self):
return self._meta.serializer(
data=self.get_serializer_data(),
context=self.get_serializer_context()
)
def _clean_form(self):
super(SerializerFormBase, self)._clean_form()
self.serializer = self.get_serializer()
if not self.serializer.is_valid():
self._errors.update(self.serializer.errors)
else:
self.cleaned_data = self.serializer.validated_data
class SerializerForm(six.with_metaclass(SerializerFormMeta, SerializerFormBase)):
_is_base = True
def form_from_serializer(serializer, **kwargs):
assert inspect.isclass(serializer) and issubclass(serializer, serializers.BaseSerializer), (
'Can only create forms from DRF Serializers'
)
kwargs.update({'serializer': serializer})
meta = type(str('Meta'), (object,), kwargs)
return type(str('{}Form'.format(serializer.__name__)), (SerializerForm,), {'Meta': meta})
|
import pytest
from diofant import Integer, SympifyError
from diofant.core.operations import AssocOp, LatticeOp
__all__ = ()
class MyMul(AssocOp):
identity = Integer(1)
def test_flatten():
assert MyMul(2, MyMul(4, 3)) == MyMul(2, 4, 3)
class Join(LatticeOp):
"""Simplest possible Lattice class."""
zero = Integer(0)
identity = Integer(1)
def test_lattice_simple():
assert Join(Join(2, 3), 4) == Join(2, Join(3, 4))
assert Join(2, 3) == Join(3, 2)
assert Join(0, 2) == 0
assert Join(1, 2) == 2
assert Join(2, 2) == 2
assert Join(Join(2, 3), 4) == Join(2, 3, 4)
assert Join() == 1
assert Join(4) == 4
assert Join(1, 4, 2, 3, 1, 3, 2) == Join(2, 3, 4)
def test_lattice_shortcircuit() | :
pytest.raises(SympifyError, lambda: Join(object))
assert Join(0, object) == 0
def te | st_lattice_print():
assert str(Join(5, 4, 3, 2)) == 'Join(2, 3, 4, 5)'
def test_lattice_make_args():
assert Join.make_args(0) == {0}
assert Join.make_args(1) == {1}
assert Join.make_args(Join(2, 3, 4)) == {Integer(2), Integer(3), Integer(4)}
|
default_a | pp_con | fig = 'daiquiri.query.apps.QueryConfig'
|
{
'resultType': # category name
{
'failed': 29, # category value and total number found of that value
'failure-ignored': 948,
'no-comparison': 4502,
'succeeded': 38609,
},
'builder':
{
'Test-Mac10.6-MacMini4.1-GeForce320M-x86-Debug': 1286,
'Test-Mac10.6-MacMini4.1-GeForce320M-x86-Release': 1134,
...
},
... # other categories from CATEGORIES_TO_SUMMARIZE
}, # end of 'categories' dictionary
'testData': # list of test results, with a dictionary for each
[
{
'resultType': 'failed',
'builder': 'Test-Mac10.6-MacMini4.1-GeForce320M-x86-Debug',
'test': 'bigmatrix',
'config': '8888',
'expectedHashType': 'bitmap-64bitMD5',
'expectedHashDigest': '10894408024079689926',
'actualHashType': 'bitmap-64bitMD5',
'actualHashDigest': '2409857384569',
'bugs': [123, 456],
'ignore-failure': false,
'reviewed-by-human': true,
},
...
], # end of 'testData' list
}
"""
return self._results[type]
@staticmethod
def _ignore_builder(builder):
"""Returns True if we should ignore expectations and actuals for a builder.
This allows us to ignore builders for which we don't maintain expectations
(trybots, Valgrind, ASAN, TSAN), and avoid problems like
https://code.google.com/p/skia/issues/detail?id=2036 ('rebaseline_server
produces error when trying to add baselines for ASAN/TSAN builders')
Args:
builder: name of this builder, as a string
Returns:
True if we should ignore expectations and actuals for this builder.
"""
return (builder.endswith('-Trybot') or
('Valgrind' in builder) or
('TSAN' in builder) or
('ASAN' in builder))
@staticmethod
def _read_dicts_from_root(root, pattern='*.json'):
"""Read all JSON dictionaries within a directory tree.
Args:
root: path to root of directory tree
pattern: which files to read within root (fnmatch-style pattern)
Returns:
A meta-dictionary containing all the JSON dictionaries found within
the directory tree, keyed by the builder name of each dictionary.
Raises:
IOError if root does not refer to an existing directory
"""
if not os.path.isdir(root):
raise IOError('no directory found at path %s' % root)
meta_dict = {}
for dirpath, dirnames, filenames in os.walk(root):
for matching_filename in fnmatch.filter(filenames, pattern):
builder = os.path.basename(dirpath)
if Results._ignore_builder(builder):
continue
fullpath = os.path.join(dirpath, matching_filename)
meta_dict[builder] = gm_json.LoadFromFile(fullpath)
return meta_dict
@staticmethod
def _write_dicts_to_root(meta_dict, root, pattern='*.json'):
"""Write all per-builder dictionaries within meta_dict to files under
the root path.
Security note: this will only write to files that already exist within
the root path (as found by os.walk() within root), so we don't need to
worry about malformed content writing to disk outside of root.
However, the data written to those files is not double-checked, so it
could contain poisonous data.
Args:
meta_dict: a builder-keyed meta-dictionary containing all the JSON
dictionaries we want to write out
root: path to root of directory tree within which to write files
pattern: which files to write within root (fnmatch-style pattern)
Raises:
IOError if root does not refer to an existing directory
KeyError if the set of per-builder dictionaries written out was
different than expected
"""
if not os.path.isdir(root):
raise IOError('no directory found at path %s' % root)
actual_builders_written = []
for dirpath, dirnames, filenames in os.walk(root):
for matching_filename in fnmatch.filter(filenames, pattern):
builder = os.path.basename(dirpath)
if Results._ignore_builder(builder):
continue
per_builder_dict = meta_dict.get(builder)
if per_builde | r_dict is not None:
fullpath = os.path.join(dirpath, matching_filename)
gm_json.WriteToFile(per_builder | _dict, fullpath)
actual_builders_written.append(builder)
# Check: did we write out the set of per-builder dictionaries we
# expected to?
expected_builders_written = sorted(meta_dict.keys())
actual_builders_written.sort()
if expected_builders_written != actual_builders_written:
raise KeyError(
'expected to write dicts for builders %s, but actually wrote them '
'for builders %s' % (
expected_builders_written, actual_builders_written))
def _generate_pixel_diffs_if_needed(self, test, expected_image, actual_image):
"""If expected_image and actual_image both exist but are different,
add the image pair to self._image_diff_db and generate pixel diffs.
Args:
test: string; name of test
expected_image: (hashType, hashDigest) tuple describing the expected image
actual_image: (hashType, hashDigest) tuple describing the actual image
"""
if expected_image == actual_image:
return
(expected_hashtype, expected_hashdigest) = expected_image
(actual_hashtype, actual_hashdigest) = actual_image
if None in [expected_hashtype, expected_hashdigest,
actual_hashtype, actual_hashdigest]:
return
expected_url = gm_json.CreateGmActualUrl(
test_name=test, hash_type=expected_hashtype,
hash_digest=expected_hashdigest)
actual_url = gm_json.CreateGmActualUrl(
test_name=test, hash_type=actual_hashtype,
hash_digest=actual_hashdigest)
self._image_diff_db.add_image_pair(
expected_image_locator=expected_hashdigest,
expected_image_url=expected_url,
actual_image_locator=actual_hashdigest,
actual_image_url=actual_url)
def _load_actual_and_expected(self):
"""Loads the results of all tests, across all builders (based on the
files within self._actuals_root and self._expected_root),
and stores them in self._results.
"""
logging.info('Reading actual-results JSON files from %s...' %
self._actuals_root)
actual_builder_dicts = Results._read_dicts_from_root(self._actuals_root)
logging.info('Reading expected-results JSON files from %s...' %
self._expected_root)
expected_builder_dicts = Results._read_dicts_from_root(self._expected_root)
categories_all = {}
categories_failures = {}
Results._ensure_included_in_category_dict(categories_all,
'resultType', [
gm_json.JSONKEY_ACTUALRESULTS_FAILED,
gm_json.JSONKEY_ACTUALRESULTS_FAILUREIGNORED,
gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON,
gm_json.JSONKEY_ACTUALRESULTS_SUCCEEDED,
])
Results._ensure_included_in_category_dict(categories_failures,
'resultType', [
gm_json.JSONKEY_ACTUALRESULTS_FAILED,
gm_json.JSONKEY_ACTUALRESULTS_FAILUREIGNORED,
gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON,
])
data_all = []
data_failures = []
builders = sorted(actual_builder_dicts.keys())
num_builders = len(builders)
builder_num = 0
for builder in builders:
builder_num += 1
logging.info('Generating pixel diffs for builder #%d of %d, "%s"...' %
(builder_num, num_builders, builder))
actual_results_for_this_builder = (
actual_builder_dicts[builder][gm_json.JSONKEY_ACTUALRESULTS])
for result_type in sorted(actual_results_for_this_builder.keys()):
results_of_this_type = actual_results_for_this_builder[result_type]
if not results_of_this_type:
continue
for image_n |
##########################################################################
#
# Copyright (c) 2013-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHAN | TABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUD | ING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
import GafferScene
Gaffer.Metadata.registerNodeDescription(
GafferScene.Transform,
"""Modifies the transforms of all locations matched by the filter.""",
"space",
"""The space in which the transform is applied.""",
"transform",
"""The transform to be applied.""",
)
GafferUI.PlugValueWidget.registerCreator(
GafferScene.Transform,
"space",
GafferUI.EnumPlugValueWidget,
labelsAndValues = (
( "World", GafferScene.Transform.Space.World ),
( "Object", GafferScene.Transform.Space.Object ),
)
)
|
xpress or
# implied warranties, including, but not limited to, the implied
# warranties of merchantability and fitness for a particular purpose
# are disclaimed. In no event shall the author be liable for any
# direct, indirect, incidental, special, exemplary, or consequential
# damages (including, but not limited to, procurement of substitute
# goods or services; loss of use, data, or profits; or business
# interruption) however caused and on any theory of liability, whether
# in contract, strict liability, or tort (including negligence or
# otherwise) arising in any way out of the use of this software, even
# if advised of the possibility of such damage.
def print_tree(tree, terminals, indent=0):
"""Print a parse tree to stdout."""
prefix = " "*indent
if tree[0] in terminals:
print prefix + repr(tree)
else:
print prefix + unicode(tree[0])
for x in tree[1:]:
print_tree(x, terminals, indent+1)
class Parser(object):
"""LR(1) parser class template.
This class is only used to store source code sniplets for the
generated parser. Code is taken out via code inspection and
pasted into the output file.
"""
class ParseErrors(Exception):
"""Exception class to represent a collection of parse errors.
Instances of this class have two attributes, `errors` and `tree`.
`errors` is a list of tuples, each describing one error.
#@ IF error_stacks
Each tuple consists of the first input token which could not
be processed, the list of grammar symbols which were allowed
at this point, and a list of partial parse trees which
represent the input parsed so far.
#@ ELSE
Each tuple consists of the first input token which could not
be processed and the list of grammar symbols which were allowed
at this point.
#@ ENDIF
`tree` is a "repaired" parse tree which might be used for further
error checking, or `None` if no repair was possible.
"""
def __init__(self, errors, tree):
msg = "%d parse errors"%len(errors)
Exception.__init__(self, msg)
self.errors = errors
self.tree = tree
def __init__(self, max_err=None, errcorr_pre=4, errcorr_post=4):
"""Create a new parser instance.
The constructor arguments are all optional, they control the
handling of parse errors: `max_err` can be given to bound the
number of errors reported during one run of the parser.
`errcorr | _pre` controls how many tokens before an invalid token
the parser considers when trying to repair the input.
`errcorr_post` controls how far beyond an invalid token the
parser reads when evaluating the quality of an attempted
repair.
"""
self.max_err = max_err
self.m = errcorr_pre
self.n = errcorr_post
@staticmethod
def l | eaves(tree):
"""Iterate over the leaves of a parse tree.
This function can be used to reconstruct the input from a
parse tree.
"""
if tree[0] in Parser.terminals:
yield tree
else:
for x in tree[1:]:
for t in Parser.leaves(x):
yield t
def _parse(self, tokens, stack, state):
"""Internal function to construct a parse tree.
'Tokens' is the input token stream, 'stack' is the inital stack
and 'state' is the inital state of the automaton.
Returns a 4-tuple (done, count, state, error). 'done' is a
boolean indicationg whether parsing is completed, 'count' is
number of successfully shifted tokens, and 'error' is None on
success or else the first token which could not be parsed.
"""
read_next = True
count = 0
while state != self._halting_state:
if read_next:
try:
lookahead = tokens.next()
except StopIteration:
return (False,count,state,None)
read_next = False
token = lookahead[0]
#@ IF parser_debugprint
debug = [ ]
for s in stack:
debug.extend([str(s[0]), repr(s[1][0])])
debug.append(str(state))
print " ".join(debug)+" [%s]"%repr(token)
#@ ENDIF parser_debugprint
if (state,token) in self._shift:
#@ IF parser_debugprint
print "shift %s"%repr(token)
#@ ENDIF
stack.append((state,lookahead))
state = self._shift[(state,token)]
read_next = True
count += 1
elif (state,token) in self._reduce:
X,n = self._reduce[(state,token)]
if n > 0:
state = stack[-n][0]
#@ IF transparent_tokens
tree = [ X ]
for s in stack[-n:]:
if s[1][0] in self._transparent:
tree.extend(s[1][1:])
else:
tree.append(s[1])
tree = tuple(tree)
#@ ELSE
tree = (X,) + tuple(s[1] for s in stack[-n:])
#@ ENDIF
#@ IF parser_debugprint
debug = [ s[1][0] for s in stack[-n:] ]
#@ ENDIF
del stack[-n:]
else:
tree = (X,)
#@ IF parser_debugprint
debug = [ ]
#@ ENDIF
#@ IF parser_debugprint
print "reduce %s -> %s"%(repr(debug),repr(X))
#@ ENDIF
stack.append((state,tree))
state = self._goto[(state,X)]
else:
#@ IF parser_debugprint
print "parse error"
#@ ENDIF
return (False,count,state,lookahead)
return (True,count,state,None)
def _try_parse(self, tokens, stack, state):
count = 0
while state != self._halting_state and count < len(tokens):
token = tokens[count][0]
if (state,token) in self._shift:
stack.append(state)
state = self._shift[(state,token)]
count += 1
elif (state,token) in self._reduce:
X,n = self._reduce[(state,token)]
if n > 0:
state = stack[-n]
del stack[-n:]
stack.append(state)
state = self._goto[(state,X)]
else:
break
return count
def parse(self, tokens):
"""Parse the tokens from `tokens` and construct a parse tree.
`tokens` must be an interable over tuples. The first element
of each tuple must be a terminal symbol of the grammar which
is used for parsing. All other element of the tuple are just
copied into the constructed parse tree.
If `tokens` is invalid, a ParseErrors exception is raised.
Otherwise the function returns the parse tree.
"""
errors = []
tokens = chain(tokens, [(self.EOF,)])
stack = []
state = 0
while True:
done,_,state,lookahead = self._parse(tokens, stack, state)
if done:
break
expect = [ t for s,t in self._reduce.keys()+self._shift.keys()
if s == state ]
#@ IF error_stacks
errors.append((lookahead, expect, [ s[1] for s in stack ]))
#@ ELSE
errors.append((lookahead, expect))
#@ ENDIF
if self.max_err is not None and len(errors) >= self.max_err:
raise self.ParseErrors(errors, None)
#@ IF parser_debugprint
print "backtrack for error recovery"
#@ ENDIF
queue = []
def split_input(m, stack, lookahead, queue):
|
"""Trivial Interfaces and Adaptation from PyProtocols.
This package is a subset of the files from Phillip J. Eby's
PyProtocols package. They are only included here to help | remove dependencies
on ex | ternal packages from the Traits package. The code has been reorganized to
address circular imports that were discovered when explicit relative imports
were added.
"""
|
# -*- coding: utf-8 -*-
#
# Minimum amount of se | ttings to run the googlytics test suite
#
# googlytics options are often overriden during tests
GOOGLE_ANALYTICS_KEY = 'U-TEST-XXX'
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'googlytics_test.sqlite3'
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'googlytics',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'goog | lytics.context_processors.googlytics',
)
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import logging.config
import os
import unittest
import six
from airflow.models import TaskInstance, DAG, DagRun
from airflow.config_templates.airflow_local_settings import DEFAULT_LOGGING_CONFIG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.utils.timezone import datetime
from airflow.utils.log.logging_mixin import set_context
from airflow.utils.log.file_task_handler import FileTaskHandler
from airflow.utils.db import create_session
from airflow.utils.state import State
DEFAULT_DATE = datetime(2016, 1, 1)
TASK_LOGGER = 'airflow.task'
FILE_TASK_HANDLER = 'file.task'
class TestFileTaskLogHandler(unittest.TestCase):
def cleanUp(self):
with create_session() as session:
session.query(DagRun).delete()
session.query(TaskInstance).delete()
def setUp(self):
super(TestFileTaskLogHandler, self).setUp()
logging.config.dictConfig(DEFAULT_LOGGING_CONFIG)
logging.root.disabled = False
self.cleanUp()
# We use file task handler by default.
def tearDown(self):
self.cleanUp()
super(TestFileTaskLogHandler, self).tearDown()
def test_default_task_logging_setup(self):
# file task handler is used by default.
logger = logging.getLogger(TASK_LOGGER)
handlers = logger.handlers
self.assertEqual(len(handlers), 1)
handler = handlers[0]
self.assertEqual(handler.name, FILE_TASK_HANDLER)
def test_file_task_handler(self):
def task_callable(ti, **kwargs):
ti.log.info("test")
dag = DAG('dag_for_testing_file_task_handler', start_date=DEFAULT_DATE)
task = PythonOperator(
task_id='task_for_testing_file_log_handler',
dag=dag,
python_callable=task_callable,
provide_context=True
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
logger = ti.log
ti.log.disabled = False
file_handler = next((handler for handler in logger.handlers
if handler.name == FILE_TASK_HANDLER), None)
self.assertIsNotNone(file_handler)
set_context(logger, ti)
self.assertIsNotNone(file_handler.handler)
# We expect set_context generates a file locally.
log_filename = file_handler.handler.baseFilename
self.assertTrue(os.path.isfile(log_filename))
self.assertTrue(log_filename.endswith("1.log"), log_filename)
ti.run(ignore_ti_state=True)
file_handler.flush()
file_handler.close()
self.assertTrue(hasattr(file_handler, 'read'))
# Return value of read must be a list.
logs = file_handler.read(ti)
self.assertTrue(isinstance(logs, list))
self.assertEqual(len(logs), 1)
target_re = r'\n\[[^\]]+\] {test_log_handlers.py:\d+} INFO - test\n'
# We should expect our log line from the callable above to appear in
# the logs we read back
six.assertRegex(
self,
logs[0],
target_re,
"Logs were " + str(logs)
)
# Remove the generated tmp log file.
os.remove(log_filename)
def test_file_task_handler_running(self):
def task_callable(ti, **kwargs):
ti.log.info("test")
dag = DAG('dag_for_testing_file_task_handler', start_date=DEFAULT_DATE)
task = PythonOperator(
task_id='task_for_testing_file_log_handler',
dag=dag,
python_callable=task_callable,
provide_context=True
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.try_number = 2
ti.state = State.RUNNING
logger = ti.log
ti.log.disabled = False
file_handler = next((handler for handler in logger.handlers
if handler.name == FILE_TASK_HANDLER), None)
self.assertIsNotNone(file_handler)
set_context(logger, ti)
self.assertIsNotNone(file_handler.handler)
# We expect set_context generates a file locally.
log_filename = file_handler.handler.baseFilename
self.assertTrue(o | s.path.isfile(log_filename))
self.assertTrue(log_filename.endswith("2.log"), log_filename)
logger.info("Test")
# Return value of read must be a list.
logs = file_handler.read(ti)
self.assertTrue(isinstance(logs, list))
# Logs for running tasks should show up too.
self.assertEqual(len(logs), 2)
# | Remove the generated tmp log file.
os.remove(log_filename)
class TestFilenameRendering(unittest.TestCase):
def setUp(self):
dag = DAG('dag_for_testing_filename_rendering', start_date=DEFAULT_DATE)
task = DummyOperator(task_id='task_for_testing_filename_rendering', dag=dag)
self.ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
def test_python_formatting(self):
expected_filename = 'dag_for_testing_filename_rendering/task_for_testing_filename_rendering/%s/42.log' % DEFAULT_DATE.isoformat()
fth = FileTaskHandler('', '{dag_id}/{task_id}/{execution_date}/{try_number}.log')
rendered_filename = fth._render_filename(self.ti, 42)
self.assertEqual(expected_filename, rendered_filename)
def test_jinja_rendering(self):
expected_filename = 'dag_for_testing_filename_rendering/task_for_testing_filename_rendering/%s/42.log' % DEFAULT_DATE.isoformat()
fth = FileTaskHandler('', '{{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log')
rendered_filename = fth._render_filename(self.ti, 42)
self.assertEqual(expected_filename, rendered_filename)
|
#!/usr/bin/env python
# Written by Filippo Bonazzi <f.bonazzi@davide.it> 2016
#
| # Convert an integer from its decimal representation into its hexadecimal
# representation.
# TODO: add argparse
import sys
import math
s = "".join(sys.argv[1].split())
for c in s:
if c not in "1234567890":
print("Bad string \"{}\"".format(s))
sys.exit(1) |
a = 0
for i in range(0, len(s)):
a += int(s[len(s) - i - 1]) * int(math.pow(10, i))
print("{0:#x}".format(a))
|
"""
Application-class that implements pyFoamChangeGGIBoundary.py
Modification of GGI and cyclicGGI interface parameters in
constant/polymesh/boundary file.
Author:
Martin Beaudoin, Hydro-Quebec, 2009. All rights reserved
"""
from PyFoam.Applications.PyFoamApplication import PyFoamApplication
from PyFoam.RunDictionary.ParsedParameterFile import ParsedParameterFile
from PyFoam.ThirdParty.six import print_
from os import path
import sys
import re
class ChangeGGIBoundary(PyFoamApplication):
def __init__(self,args=None):
description="""\
Change GGI boundary condition parameters
"""
PyFoamApplication.__init__(self,
args=args,
description=description,
usage="%prog <caseDirectory> ggiPatchName",
interspersed=True,
changeVersion=False,
nr=2)
def addOptions(self):
self.parser.add_option("--shadowPatch",
action="store",
dest="shadowPatch",
default=None,
help='Name of the shadowPatch')
self.parser.add_option("--shadowName",
action="store",
dest="shadowName",
default=None,
help='Name of the shadowPatch. Deprecated. Use --shadowPatch instead')
self.parser.add_option("--zone",
action="store",
dest="zone",
default=None,
help='Name of the zone for the GGI patch')
self.parser.add_option("--patchZoneName",
action="store",
dest="patchZoneName",
default=None,
help='Name of the zone for the GGI patch. Deprecated. Use --zone instead')
self.parser.add_option("--bridgeOverlap",
action="store",
dest="bridgeOverlap",
default=None,
help='bridgeOverlap flag (on/off)')
self.parser.add_option("--bridgeOverlapFlag",
action="store",
dest="bridgeOverlapFlag",
default=None,
help='bridgeOverlap flag (on/off). Deprecated. Use --bridgeOverlap instead')
self.parser.add_option("--rotationAxis",
action="store",
dest="rotationAxis",
default=None,
help='rotation axis for cyclicGgi')
self.parser.add_option("--rotationAngle",
action="store",
dest="rotationAngle",
default=None,
help='rotation axis angle for cyclicGgi')
self.parser.add_option("--separationOffset",
action="store",
dest="separationOffse | t",
default=None,
help='separation offset for cyclicGgi')
self.parser.add_option("--test",
action="store_true",
default=False,
dest="test",
help="Only print the new boundary file")
| def run(self):
fName=self.parser.getArgs()[0]
bName=self.parser.getArgs()[1]
boundary=ParsedParameterFile(path.join(".",fName,"constant","polyMesh","boundary"),debug=False,boundaryDict=True)
bnd=boundary.content
if type(bnd)!=list:
self.error("Problem with boundary file (not a list)")
found=False
for val in bnd:
if val==bName:
found=True
elif found:
bcType=val["type"]
if re.match("cyclicGgi", bcType)!= None or re.match("ggi", bcType)!= None:
if self.parser.getOptions().shadowPatch!=None:
shadowPatch=self.parser.getOptions().shadowPatch
val["shadowPatch"]=shadowPatch
if shadowPatch not in bnd:
self.error("\n Option --shadowPatch for patch:",bName,": there is no patch called",shadowPatch,"\n")
if self.parser.getOptions().zone!=None:
val["zone"]=self.parser.getOptions().zone
if self.parser.getOptions().bridgeOverlap!=None:
val["bridgeOverlap"]=self.parser.getOptions().bridgeOverlap
if val["type"]=="cyclicGgi":
if self.parser.getOptions().rotationAxis!=None:
val["rotationAxis"]=self.parser.getOptions().rotationAxis
if self.parser.getOptions().rotationAngle!=None:
val["rotationAngle"]=self.parser.getOptions().rotationAngle
if self.parser.getOptions().separationOffset!=None:
val["separationOffset"]=self.parser.getOptions().separationOffset
# Deprecated
if self.parser.getOptions().shadowName!=None:
self.warning("\n PatchName:",bName,": Option --shadowName is deprecated. Use --shadowPatch instead\n")
shadowName=self.parser.getOptions().shadowName
val["shadowPatch"]=shadowName
if shadowName not in bnd:
self.error("\n Option --shadowName for patch:",bName,": there is no patch called",shadowName,"\n")
# Deprecated
if self.parser.getOptions().patchZoneName!=None:
self.warning("\n PatchName:",bName,": Option --patchZoneName is deprecated. Use --zone instead\n")
val["zone"]=self.parser.getOptions().patchZoneName
# Deprecated
if self.parser.getOptions().bridgeOverlapFlag!=None:
self.warning("\n PatchName:",bName,": Option --bridgeOverlapFlag is deprecated. Use --bridgeOverlap instead\n")
val["bridgeOverlap"]=self.parser.getOptions().bridgeOverlapFlag
else:
print_("Unsupported GGI type '",bcType,"' for patch",bName)
break
if not found:
self.error("Boundary",bName,"not found in",bnd[::2])
if self.parser.getOptions().test:
print_(boundary)
else:
boundary.writeFile()
|
(-?\d+|-?\d+\.(\d+))\]", re.I
)
floatMatch = floatRE.match(self.replacement)
self._floatMatch = floatMatch
if self._stringMatch is not None:
stringMatch = self._stringMatch
else:
stringRE = re.compile(r"string\((\d+)\)", re.I)
stringMatch = stringRE.match(self.replacement)
self._stringMatch = stringMatch
if self._hexMatch is not None:
hexMatch = self._hexMatch
else:
hexRE = re.compile(r"hex\((\d+)\)", re.I)
hexMatch = hexRE.match(self.replacement)
self._hexMatch = hexMatch
if self._listMatch is not None:
listMatch = self._listMatch
else:
listRE = re.compile(r"list(\[[^\]]+\])", re.I)
listMatch = listRE.match(self.replacement)
self._listMatch = listMatch
# Valid replacements: ipv4 | ipv6 | integer[<start>:<end>] | string(<i>)
if self.replacement.lower() == "ipv4":
x = 0
replacement = ""
while x < 4:
replacement += str(random.randint(0, 255)) + "."
x += 1
replacement = replacement.strip(".")
return replacement
elif self.replacement.lower() == "ipv6":
x = 0
replacement = ""
while x < 8:
replacement += hex(random.randint(0, 65535))[2:] + ":"
x += 1
replacement = replacement.strip(":")
return replacement
elif self.replacement.lower() == "mac":
x = 0
replacement = ""
# Give me 6 blocks of 2 hex
while x < 6:
y = 0
while y < 2:
replacement += hex(random.randint(0, 15))[2:]
y += 1
replacement += ":"
x += 1
replacement = replacement.strip(":")
return replacement
elif self.replacement.lower() == "guid":
return str(uuid.uuid4())
elif integerMatch:
startInt = int(integerMatch.group(1))
endInt = int(integerMatch.group(2))
if endInt >= startInt:
replacementInt = random.randint(startInt, endInt)
if self.replacementType == "rated":
rateFactor = 1.0
if type(s.hourOfDayRate) == dict:
try:
rateFactor *= s.hourOfDayRate[str(s.now())]
except KeyError:
import traceback
stack = traceback.format_exc()
logger.error(
"Hour of day rate failed for token %s. Stacktrace %s"
% stack
)
if type(s.dayOfWeekRate) == dict:
try:
weekday = datetime.date.weekday(s.now())
if weekday == 6:
weekday = 0
else:
weekday += 1
rateFactor *= s.dayOfWeekRate[str(weekday)]
except KeyError:
import traceback
stack = traceback.format_exc()
logger.error(
"Day of week rate failed. Stacktrace %s" % stack
)
replacementInt = int(round(replacementInt * rateFactor, 0))
replacement = str(replacementInt)
return replacement
else:
logger.error(
"Start integer %s greater than end integer %s; will not replace"
% (startInt, endInt)
)
return old
elif floatMatch:
try:
startFloat = float(floatMatch.group(1))
endFloat = float(floatMatch.group(3))
significance = 0
if floatMatch.group(2) is not None:
significance = len(floatMatch.group(2))
if endFloat >= startFloat:
floatret = round(
random.uniform(startFloat, endFloat), significance
)
if self.replacementType == "rated":
rateFactor = 1.0
now = s.now()
if type(s.hourOfDayRate) == dict:
try:
rateFactor *= s.hourOfDayRate[str(now.hour)]
except KeyError:
import traceback
stack = traceback.format_exc()
logger.error(
"Hour of day rate failed for token %s. Stacktrace %s"
% stack
)
if type(s.dayOfWeekRate) == dict:
try:
weekday = datetime.date.weekday(now)
if weekday == 6:
weekday = 0
else:
weekday += 1
rateFactor *= s.dayOfWeekRate[str(weekday)]
except KeyError:
import traceback
stack = traceback.format_exc()
logger.error(
"Day of week rate failed. Stacktrace %s"
| % stack
)
floatret = round(floatret * rateFactor, significance)
floatret = str(floatret)
return floatret
else:
logger.error(
"Start float %s greater than | end float %s; will not replace"
% (startFloat, endFloat)
)
return old
except ValueError:
logger.error(
"Could not parse float[%s:%s]"
% (floatMatch.group(1), floatMatch.group(4))
)
return old
elif stringMatch:
strLength = int(stringMatch.group(1))
if strLength == 0:
return ""
elif strLength > 0:
replacement = ""
while len(replacement) < strLength:
# Generate a random ASCII between dec 33->126
replacement += chr(random.randint(33, 126))
# Practice safe strings
replacement = re.sub(
"%[0-9a-fA-F]+",
"",
six.moves.urllib.parse.quote(replacement),
)
return replacement
else:
logger.error(
"Length specifier %s for string replacement must be greater than 0; will not replace"
% (strLength)
)
return old
elif hexMatch:
strLength = int(hexMatch.gro |
lf):
xpub_hot = self.wallet.master_public_keys["x1/"]
xpub_cold = self.wallet.master_public_keys["x2/"]
long_id = self.make_long_id(xpub_hot, xpub_cold)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
def make_xpub(self, xpub, s):
_, _, _, c, cK = deserialize_xkey(xpub)
cK2, c2 = bitcoin._CKD_pub(cK, c, s)
xpub2 = ("0488B21E" + "00" + "00000000" + "00000000").decode("hex") + c2 + cK2
return EncodeBase58Check(xpub2)
def make_billing_address(self, num):
long_id, short_id = self.get_user_id()
xpub = self.make_xpub(billing_xpub, long_id)
_, _, _, c, cK = deserialize_xkey(xpub)
cK, c = bitcoin.CKD_pub(cK, c, num)
address = public_key_to_bc_address( cK )
return address
def create_extended_seed(self, wallet, window):
seed = wallet.make_seed()
if not window.show_seed(seed, None):
return
if not window.verify_seed(seed, None, self.seed_func):
return
password = window.password_dialog()
wallet.storage.put('seed_version', wallet.seed_version, True)
wallet.storage.put('use_encryption', password is not None, True)
words = seed.split()
n = len(words)/2
wallet.add_cosigner_seed(' '.join(words[0:n]), 'x1/', password)
wallet.add_cosigner_xpub(' '.join(words[n:]), 'x2/')
msg = [
_('Your wallet file is:') + " %s"%os.path.abspath(wallet.storage.path),
_('You need to be online in order to complete the creation of your wallet.'),
_('If you generated your seed on an offline computer, click on "%s" to close this window, move your wallet file to an online computer and reopen it with Electrum.') % _('Close'),
_('If you are online, click on "%s" to continue.') % _('Next')
]
return window.question('\n\n'.join(msg), no_label=_('Close'), yes_label=_('Next'))
def show_disclaimer(self, wallet, window):
msg = [
_("Two-factor authentication is a service provided by TrustedCoin.") + ' ',
_("It uses a multi-signature wallet, where you own 2 of 3 keys.") + ' ',
_("The third key is stored on a remote server that signs transactions on your behalf.") + ' ',
_("To use this service, you will need a smartphone with Google Authenticator.") + '\n\n',
_("A small fee will be charged on each transaction that uses the remote server.") + ' ',
_("You may check and modify your billing preferences once the installation is complete.") + '\n\n',
_("Note that your coins are not locked in this service.") + ' ',
_("You may withdraw your funds at any time and at no cost, without the remote server, by using the 'restore wallet' option with your wallet seed.") + '\n\n',
_('The next step will generate the seed of your wallet.') + ' ',
_('This seed will NOT be saved in your computer, and it must be stored on paper.') + ' ',
_('To be safe from malware, you may want to do this on an offline computer, and move your wallet later to an online computer.')
]
icon = QPixmap(':icons/trustedcoin.png')
if not window.question(''.join(msg), icon=icon):
return False
self.wallet = wallet
self.set_enabled(True)
return True
def restore_third_key(self, wallet):
long_user_id, short_id = self.get_user_id()
xpub3 = self.make_xpub(signing_xpub, long_user_id)
wallet.add_master_public_key('x3/', xpub3)
@hook
def do_clear(self):
self.is_billing = False
@hook
def load_wallet(self, wallet):
self.trustedcoin_button = StatusBarButton( QIcon(":icons/trustedcoin.png"), _("Network"), self.settings_dialog)
self.window.statusBar().addPermanentWidget(self.trustedcoin_button)
self.xpub = self.wallet.master_public_keys.get('x1/')
self.user_id = self.get_user_id()[1]
t = threading.Thread(target=self.request_billing_info)
t.setDaemon(True)
t.start()
@hook
def close_wallet(self):
self.window.statusBar().removeWidget(self.trustedcoin_button)
@hook
def get_wizard_action(self, window, wallet, action):
if hasattr(self, action):
return getattr(self, action)
@hook
def installwizard_restore(self, window, storage):
if storage.get('wallet_type') != '2fa':
return
seed = window.enter_seed_dialog("Enter your seed", None, func=self.seed_func)
if not seed:
return
wallet = Wallet_2fa(storage)
self.wallet = wallet
password = window.password_dialog()
wallet.add_seed(seed, password)
words = seed.split()
n = len(words)/2
wallet.add_cosigner_seed(' '.join(words[0:n]), 'x1/', password)
wallet.add_cosigner_seed(' '.join(words[n:]), 'x2/', password)
self.restore_third_key(wallet)
wallet.create_main_account(password)
# disable plugin
self.set_enabled(False)
return wallet
def create_remote_key(self, wallet, window):
self.wallet = wallet
self.window = window
if wallet.storage.get('wallet_type') != '2fa':
raise
return
email = self.accept_terms_of_use(window)
if not email:
return
xpub_hot = wallet.master_public_keys["x1/"]
xpub_cold = wallet.master_public_keys["x2/"]
# Generate third key deterministically.
long_user_id, self.user_id = self.get_user_id()
xpub3 = self.make_xpub(signing_xpub, long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub_hot, xpub_cold, email)
except socket.error:
self.window.show_message('Server not reachable, aborting')
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
raise e
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
self.window.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
try:
assert _id == self.user_id, ("user id error", _id, self.user_id)
assert xpub3 == _xpub3, ("xpub3 error", xpub3, _xpub3)
except Exception as e:
self.window.show_message(str(e))
return
if not self.setup_google_auth(self.window, self.user_id, otp_secret):
return
self.wallet.add_master_public_key('x3/', xpub3)
return True
def need_server(self, tx):
from electrum.account import BIP32_Account
# Detect if the server is needed
long_id, short_id = self.get_user_id()
xpub3 = self.wallet.master_public_keys['x3/']
for x in tx.inputs_to_sign():
if x[0:2] == 'ff':
xpub, sequence = BIP32_Account.parse_xpubkey(x)
if xpub == xpub3:
return True
return False
@hook
def send_tx(self, tx):
self.print_error("twofactor:send_tx")
if self.wallet.storage.get('wallet_type') != '2fa':
return
if not self.need_server(tx):
self.print_error("twofactor: xpub3 not needed")
s | elf.auth_code = None
return
self.auth_code = self.auth_dialog()
@hook
def before_send(self): |
# request billing info before forming the transaction
self.billing_info = None
self.waiting_dialog = WaitingDialog(self.window, 'please wait...', self.request_billing_info)
self.waiting_dialog.start()
self.waiting_dialog.wait()
if self.billing_info is None:
self.window.show_message('Could not contact server')
return True
return False
@hook
def extra_f |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-02 18:22
from __future__ import unicode_literals
import django.contrib.auth.models
import django.core.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('description', models.TextField(blank=True)),
('groups', models.ManyToManyField(blank=True, help_text='The g | roups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific pe | rmissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name_plural': 'users',
'abstract': False,
'verbose_name': 'user',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
from lightbulb.api.api_native import LightBulb
import base64
lightbulbapp = LightBulb()
path = "/test/env/bin/lightbulb" #Path to binary
configuration_A = {'TESTS_FILE_TYPE': 'None', 'ALPHABET': '32-57,58-64,65-126', 'SEED_FILE_TYPE': 'FLEX', 'TESTS_FILE': 'None','DFA1_MINUS_DFA2': 'True', 'SAVE': 'False', 'HANDLER': 'None', 'SEED_FILE': '{library}/regex/BROWSER/html_p_attribute.y'}
configuration_B = {'TESTS_FILE_TYPE': 'None', 'ALPHABET': '32-57,58-64,65-126', 'SEED_FILE_TYPE': 'FLEX', 'TESTS_FILE': 'None','DFA1_MINUS_DFA2': 'True', 'SAVE': 'False', 'HANDLER': 'None', 'SEED_FILE': '{library}/regex/BROWSER/html_p_attribute.y'}
handlerconfig_A = {'WSPORT': '5000','WBPORT': '5080', 'BR | OWSERPARSE': 'True', 'DELAY': '50', ' | HOST': 'localhost'}
handlerconfig_B = {'URL': 'http://127.0.0.1/~fishingspot/securitycheck/index.php', 'BLOCK':'Impact', 'REQUEST_TYPE':'GET','PARAM':'input','BYPASS':'None', 'PROXY_SCHEME': 'None', 'PROXY_HOST': 'None', 'PROXY_PORT': 'None', 'PROXY_USERNAME': 'None', 'PROXY_PASSWORD': 'None','USER_AGENT': "Mozilla/5.0", 'REFERER': "http://google.com"}
stats = lightbulbapp.start_sfadiff_algorithm(
path,
configuration_A,
configuration_B,
handlerconfig_A,
handlerconfig_B,
"BrowserHandler",
"HTTPHandler")
print stats
|
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND | , either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ======== | ===== enthought library imports =======================
from __future__ import absolute_import
from PySide import QtGui, QtCore
from traits.trait_types import Event
from traitsui.api import View, UItem
from traitsui.basic_editor_factory import BasicEditorFactory
from traitsui.editors.api import TableEditor
from traitsui.handler import Controller
from traitsui.qt4.editor import Editor
from traitsui.qt4.key_event_to_name import key_event_to_name
from traitsui.table_column import ObjectColumn
# ============= standard library imports ========================
# ============= local library imports ==========================
# from traitsui.basic_editor_factory import BasicEditorFactory
from pychron.envisage.key_bindings import keybinding_exists
class KeyBindingsEditor(Controller):
def traits_view(self):
cols = [
ObjectColumn(name="binding", editor=KeyBindingEditor()),
ObjectColumn(name="description", editable=False, width=400),
]
v = View(
UItem("bindings", editor=TableEditor(columns=cols)),
width=500,
height=600,
title="Edit Key Bindings",
kind="livemodal",
buttons=["OK", "Cancel"],
resizable=True,
)
return v
class KeyBindingControl(QtGui.QLabel):
def keyPressEvent(self, event):
"""Handle keyboard keys being pressed."""
# Ignore presses of the control and shift keys.
if event.key() not in (QtCore.Qt.Key_Control, QtCore.Qt.Key_Shift):
self.editor.key = event
class _KeyBindingEditor(Editor):
key = Event
# clear = Event
# refresh_needed = Event
# dump_needed = Event
def dispose(self):
# override Editor.dispose. don't break reference to control
if self.ui is None:
return
name = self.extended_name
if name != "None":
self.context_object.on_trait_change(self._update_editor, name, remove=True)
if self._user_from is not None:
for name, handler in self._user_from:
self.on_trait_change(handler, name, remove=True)
if self._user_to is not None:
for object, name, handler in self._user_to:
object.on_trait_change(handler, name, remove=True)
# self.object = self.ui = self.item = self.factory = self.control = \
# self.label_control = self.old_value = self._context_object = None
def init(self, parent):
self.control = self._create_control()
# self.sync_value(self.factory.refresh_needed, 'refresh_needed', mode='to')
# self.sync_value(self.factory.refresh_needed, 'dump_needed', mode='to')
def _create_control(self):
ctrl = KeyBindingControl()
ctrl.editor = self
return ctrl
def update_editor(self):
"""Updates the editor when the object trait changes externally to the
editor.
"""
if self.control:
self.control.setText(self.value)
def _key_changed(self, event):
key_name = key_event_to_name(event)
key_name = key_name.replace("-", "+")
desc = keybinding_exists(key_name)
if desc:
if (
QtGui.QMessageBox.question(
self.control,
"Duplicate Key Definition",
"'%s' has already been assigned to '%s'.\n"
"Do you wish to continue?" % (key_name, desc),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
QtGui.QMessageBox.No,
)
!= QtGui.QMessageBox.Yes
):
return
# else:
# clear_keybinding(desc)
# self.refresh_needed = True
self.value = key_name
self.control.setText(key_name)
class KeyBindingEditor(BasicEditorFactory):
klass = _KeyBindingEditor
# refresh_needed = Str
# ============= EOF =============================================
|
#!/usr/bin/python2.7
import sys
import csv
import yaml
import codecs
TO_BE_TRANSLATED_MARK = "***TO BE TRANSLATED***"
def collect(result, node, prefix=None):
for key,value in n | ode.items():
new_prefix = (key if prefix == None else prefix + "." + key)
if isinstance(value, dict):
collect(result, value, new_prefix)
else:
result[new_prefix] = value |
def collect_old_csv(filename):
result = {}
reader = csv.reader(open(filename))
for row in reader:
if TO_BE_TRANSLATED_MARK not in row[1]:
result[row[0]] = row[1].decode("utf-8")
return result
def flatten(namespace=None,old_csv=None):
namespace = "" if namespace == None else namespace + "."
en_src = yaml.load(open("%sen.yml" % namespace))
ja_src = yaml.load(open("%sja.yml" % namespace))
en = {}
collect(en, en_src["en"])
ja = {}
collect(ja, ja_src["ja"])
ja_old = collect_old_csv(old_csv) if old_csv else {}
writer = csv.writer(sys.stdout)
for key,value in sorted(en.items()):
val = TO_BE_TRANSLATED_MARK + value
if key in ja: val = ja[key]
elif key in ja_old: val = ja_old[key]
writer.writerow([key, val.encode("UTF-8")])
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage: yaml2csv.py namespace('server'|'client') [old-translated-csv-file]"
sys.exit(1)
flatten(sys.argv[1], None if len(sys.argv) < 3 else sys.argv[2])
|
es are currently unreachable.
After you have received a ConsistencyLostError, you can either
wait for a sufficiently up-to-date replica to become reachable in which
case the session can be continued or you can reset the session by calling
the reset() method. If you have called the reset() method,
a new session is started with the next invocation to a CRDT replica.
Notes:
The CRDT state is kept entirely on non-lite (data) members. If there
aren't any and the methods here are invoked on a lite member, they will
fail with an NoDataMemberInClusterError.
"""
_EMPTY_ADDRESS_LIST = []
def __init__(self, service_name, name, context):
super(PNCounter, self).__init__(service_name, name, context)
self._observed_clock = VectorClock()
self._max_replica_count = 0
self._current_target_replica_address = None
def get(self):
"""Returns the current value of the counter.
Returns:
hazelcast.future.Future[int]: The current value of the counter.
Raises:
NoDataMemberInClusterError: if the cluster does not | contain any data members.
ConsistencyLostError: if the session guarantees have been lost.
"""
return self._invoke_internal(pn_counter_get_codec)
def get_and_add(self, delta):
"""Adds the given value to the current value and returns the previous value.
Args:
delta (int): The value to add.
Returns:
hazelcast.future.Future[int]: The previous value.
Raises:
| NoDataMemberInClusterError: if the cluster does not contain any data members.
ConsistencyLostError: if the session guarantees have been lost.
"""
return self._invoke_internal(pn_counter_add_codec, delta=delta, get_before_update=True)
def add_and_get(self, delta):
"""Adds the given value to the current value and returns the updated value.
Args:
delta (int): The value to add.
Returns:
hazelcast.future.Future[int]: The updated value.
Raises:
NoDataMemberInClusterError: if the cluster does not contain any data members.
ConsistencyLostError: if the session guarantees have been lost.
"""
return self._invoke_internal(pn_counter_add_codec, delta=delta, get_before_update=False)
def get_and_subtract(self, delta):
"""Subtracts the given value from the current value and returns the previous value.
Args:
delta (int): The value to subtract.
Returns:
hazelcast.future.Future[int]: The previous value.
Raises:
NoDataMemberInClusterError: if the cluster does not contain any data members.
ConsistencyLostError: if the session guarantees have been lost.
"""
return self._invoke_internal(pn_counter_add_codec, delta=-1 * delta, get_before_update=True)
def subtract_and_get(self, delta):
"""Subtracts the given value from the current value and returns the updated value.
Args:
delta (int): The value to subtract.
Returns:
hazelcast.future.Future[int]: The updated value.
Raises:
NoDataMemberInClusterError: if the cluster does not contain any data members.
ConsistencyLostError: if the session guarantees have been lost.
"""
return self._invoke_internal(
pn_counter_add_codec, delta=-1 * delta, get_before_update=False
)
def get_and_decrement(self):
"""Decrements the counter value by one and returns the previous value.
Returns:
hazelcast.future.Future[int]: The previous value.
Raises:
NoDataMemberInClusterError: if the cluster does not contain any data members.
ConsistencyLostError: if the session guarantees have been lost.
"""
return self._invoke_internal(pn_counter_add_codec, delta=-1, get_before_update=True)
def decrement_and_get(self):
"""Decrements the counter value by one and returns the updated value.
Returns:
hazelcast.future.Future[int]: The updated value.
Raises:
NoDataMemberInClusterError: if the cluster does not contain any data members.
ConsistencyLostError: if the session guarantees have been lost.
"""
return self._invoke_internal(pn_counter_add_codec, delta=-1, get_before_update=False)
def get_and_increment(self):
"""Increments the counter value by one and returns the previous value.
Returns:
hazelcast.future.Future[int]: The previous value.
Raises:
NoDataMemberInClusterError: if the cluster does not contain any data members.
ConsistencyLostError: if the session guarantees have been lost.
"""
return self._invoke_internal(pn_counter_add_codec, delta=1, get_before_update=True)
def increment_and_get(self):
"""Increments the counter value by one and returns the updated value.
Returns:
hazelcast.future.Future[int]: The updated value.
Raises:
NoDataMemberInClusterError: if the cluster does not contain any data members.
UnsupportedOperationError: if the cluster version is less than 3.10.
ConsistencyLostError: if the session guarantees have been lost.
"""
return self._invoke_internal(pn_counter_add_codec, delta=1, get_before_update=False)
def reset(self):
"""Resets the observed state by this PN counter.
This method may be used after a method invocation has thrown a ``ConsistencyLostError``
to reset the proxy and to be able to start a new session.
"""
self._observed_clock = VectorClock()
def _invoke_internal(self, codec, **kwargs):
delegated_future = Future()
self._set_result_or_error(
delegated_future, PNCounter._EMPTY_ADDRESS_LIST, None, codec, **kwargs
)
return delegated_future
def _set_result_or_error(
self, delegated_future, excluded_addresses, last_error, codec, **kwargs
):
target = self._get_crdt_operation_target(excluded_addresses)
if not target:
if last_error:
delegated_future.set_exception(last_error)
return
delegated_future.set_exception(
NoDataMemberInClusterError(
"Cannot invoke operations on a CRDT because "
"the cluster does not contain any data members"
)
)
return
request = codec.encode_request(
name=self.name,
replica_timestamps=self._observed_clock.entry_set(),
target_replica_uuid=target.uuid,
**kwargs
)
future = self._invoke_on_target(request, target.uuid, codec.decode_response)
checker_func = functools.partial(
self._check_invocation_result,
delegated_future=delegated_future,
excluded_addresses=excluded_addresses,
target=target,
codec=codec,
**kwargs
)
future.add_done_callback(checker_func)
def _check_invocation_result(
self, future, delegated_future, excluded_addresses, target, codec, **kwargs
):
try:
result = future.result()
self._update_observed_replica_timestamp(result["replica_timestamps"])
delegated_future.set_result(result["value"])
except Exception as ex:
_logger.exception(
"Exception occurred while invoking operation on target %s, "
"choosing different target",
target,
)
if excluded_addresses == PNCounter._EMPTY_ADDRESS_LIST:
excluded_addresses = []
excluded_addresses.append(target)
self._set_result_or_error(delegated_future, excluded_addresses, ex, codec, **kwargs)
def _get_crdt_operation_target(self, excluded_addre |
import os
import unittest
import moderngl
import pycodestyle
class TestCase(unittest.TestCase):
def test_style(self):
config_file = os.path.join(os.path.dirname(__file__), '.. | ', 'tox.ini')
style = pycodestyle.StyleGuide(config_file=config_file, ignore='E402')
check = style.check_files([
os.path.join(os.path.dirname(__file__), '../moderngl/__init__.py'),
os.path.join(os.path.dirname( | __file__), '../moderngl/__main__.py'),
])
self.assertEqual(check.total_errors, 0)
if __name__ == '__main__':
unittest.main()
|
ord = input(color.BLEU + "password > " + color.DARKROUGE + "new password > " + color.ENDC)
error = Sub_Menu.PassHandle(crypt, password)
print (color.VERT + "[+]" + color.ENDC + " changing the password to " + color.VERT + "'" + password + "'" + color.ENDC)
time.sleep(1)
return password
elif PasswordChoice == "5":
print(color.VERT + "[+]" + color.ENDC + " going back to main menu.")
time.sleep(0.3)
return password
else:
print(color.ROUGE + "[*]" + color.ENDC + " please enter a valid option!")
else:
print(color.ROUGE + "[*]" + color.ENDC + " please select a security type if you want to choose a password.")
time.sleep(1.5)
return password
#take the security type and password in parameter. If a new password is chosen the old
#password gonna be reset to zero.
def securityMenu(crypt, password):
while True:
security_text = color.BLEU + color.BOLD + """
-WPA2 """ + color.ENDC + """is the most advanced wifi security protocol curently used by most
router by default. The passphrase must have a minimum of 8 character.""" + color.BLEU + color.BOLD + """\n
-WPA""" + color.ENDC + """ wpa is older and less secure than wpa2. it is using an older
encryption (TKIP). Like wpa2 you need to put at least 8 charactere. """ + color.BLEU + color.BOLD + """\n
-WEP""" + color.ENDC + """ wep is deprecated and can be very easely cracked. your wep key must
be at least 10 charactere and only contain hexadecimal character."""
print(security_text)
print ("\n - the current security of the access point is " + color.VERT + "'" + crypt + "'" + color.ENDC)
print("")
print("%53s" % ("current options" + color.ENDC))
print("%61s" % (color.DARKCYAN + "-----------------------" + color.ENDC))
print("%38s" % ("(1) WPA2."))
print("%44s" % ("(2) WPA (TKIP)."))
print("%47s" % ("(3) WEP (64 bits)."))
print("%45s" % ("(4) no security."))
print("%44s" % ("(5) main menu.\n"))
while True:
NameChoice = input(color.BLEU + "security > " + color.ENDC)
pwd = ""
if NameChoice == "1":
Sec = "WPA2"
crypt, password = Sub_Menu.AskPassword(Sec, pwd)
return crypt, password
elif NameChoice == "2":
Sec = "WPA"
crypt, password = Sub_Menu.AskPassword(Sec, pwd)
return crypt, password
elif NameChoice == "3":
Sec = "WEP"
crypt, password = Sub_Menu.AskPassword(Sec, pwd)
return crypt, password
elif NameChoice == "4":
print (color.VERT + "[+]" + color.ENDC + " deleting the " + color.VERT + crypt + color.ENDC + " security.")
time.sleep(1)
crypt = "N/A"
password = "N/A"
return crypt, password
elif NameChoice == "5":
print(color.VERT + "[+]" + color.ENDC + " going back to main menu.")
time.sleep(0.3)
return crypt, password
else:
print(color.ROUGE + "[*]" + color.ENDC + " please enter a valid option!")
#giving the option to decide if the dhcp server will be on or off. It will also
#give the option to change the dhcp pool adresse.
def dhcpMenu(dhcp):
while True:
#putting some information for the dhcp in variable
couleur = color.Color_check(dhcp)
dhcpPool = "10.0.0.10-250"
dhcpLease = "12h"
# show the appropriate option in the menu
if dhcp == "N/A":
dhcpOPTION = "(1) set dhcp server to" + color.VERT + " 'on'" + color.ENDC
else:
dhcpOPTION = "%47s" % " (1) set dhcp server to" + color.ROUGE + " 'off'" + color.ENDC
print ("""\n the dhcp server should always be on. If the dhcp is set to 'N/A' the client
will need to have is adresse, gateway and dns set manualy.\n""")
print (color.BOLD + " dhcp status: " + color.ENDC + couleur + "'" + dhcp + "'" + color.ENDC)
print (color.BOLD + " dhcp pool: " + color.ENDC + color.BLEU + dhcpPool + color.ENDC)
print (color.BOLD + " dhcp lease: " + color.ENDC + color.BLEU + dhcpLease + color.ENDC)
print("")
print("%49s" % ("current options" + color.ENDC))
print("%57s" % (color.DARKCYAN + "-----------------------" + color.ENDC))
print("%61s" % ( dhcpOPTION))
print("%40s" % ("(5) main menu.\n"))
while True:
DhcpChoice = input(color.BLEU + "dhcp > " + color.ENDC)
#check the last dhcp value and take the decision to put it to on or off
if DhcpChoice == "1":
if dhcp == "N/A":
dhcp = "ON"
else:
dhcp = "N/A"
print (color.VERT + "[+]" + color.ENDC + " changing dhcp status to " + color.VERT + "'" + dhcp + "'" + color.ENDC)
time.sleep(1)
return dhcp
#if this option is chosen to go back to main menu
elif DhcpChoice == "5":
| print(color.VERT + "[+]" + color.ENDC + " going back to main menu.")
time.sleep(0.3)
return dhcp
else:
print(color.ROUGE + "[*]" + color.ENDC + " please enter a valid op | tion!")
#show the menu for chosing dns option. The dns object can be change to on or N/A.
# I am planing to give the user the choice to put their dns redirect entry directly
# in the program and in the config file.
def dnsMenu(dns):
while True:
couleur = color.Color_check(dns)
# show the appropriate option in the menu
if dns == "N/A":
dnsOPTION = "(1) set dns server to" + color.VERT + " 'on' " + color.ENDC
else:
dnsOPTION = "(1) set dns server to" + color.ROUGE + " 'off'" + color.ENDC
print ("""\n if dns fowarding is set to 'on' dnsmasq will start the dns server and
start fowarding all the request to the google dns server. When the dns
server is active its possible to redirect the client to the ip adresse
of your choice """)
print (color.BOLD + "\n dns status:" + color.ENDC + couleur + " '" + dns + "'" + color.ENDC)
print("%51s" % ("current options" + color.ENDC))
print("%59s" % (color.DARKCYAN + "-----------------------" + color.ENDC))
print("%63s" % (dnsOPTION))
print("%47s" % ("(2) redirect client."))
print("%46s" % ("(3) cleaning entry."))
print("%42s" % ("(5) main menu.\n"))
while True:
DnsChoice = input(color.BLEU + "dns > " + color.ENDC)
if DnsChoice == "1":
if dns == "N/A":
dns = "ON"
else:
dns = "N/A"
print (color.VERT + "[+]" + color.ENDC + " changing dns status to " + color.VERT + "'" + dns + "'" + color.ENDC)
time.sleep(1)
return dns
if DnsChoice == "2":
while True:
# read the dnsmasq.host file and print the message.
print(Sub_Menu.dns_message)
entry_number = read_dnsmasq_host()
# give the user de choice to do a new entry.
print(color.DARKYELLOW + "\ndo you want to write an entry in the file? (y/n)" + color.ENDC)
choice = input(color.BLEU + "dns > " + color.ENDC)
# if choice is yes, we ask the user t |
"""
====================================================
Shuffle channels' data in the time domain and plot.
====================================================
"""
# Author: Eberhard Eich
# Praveen Sripad
#
# License: BSD (3-clause)
import numpy as np
import os.path as op
import mne
from jumeg.jumeg_utils import (get_files_from_list, time_shuffle_slices,
channel_indices_from_list)
from mne.datasets import sample
data_path = sample.data_path()
raw_fname = str(data_path + '/MEG/sample/sample_audvis_raw.fif')
# shuffle all MEG channels that begin with number 11
shflchanlist = ['MEG 11..']
# shuffle the whole length of the data
tmin, tmax = 0., None
# apply the shuffling
# time_shuffle_slices(raw_fname, shufflechans=shflchanlist, tmin=tmin, tmax=tmax)
plot_things = True
if plot_things:
permname = op.join(op.dirname(raw_fname),
op.basename(raw_fname).split('-')[0]) + ',tperm-raw.fif'
rawraw = mne.io.Raw(raw_fname,preload=True)
shflpick = channel_indices_from_list(rawraw.info['ch_names'][:],
shflchanlist)
procdperm = mne.io.Raw(permname, preload=True)
figraw = rawraw.plot_psd(fmin=0., fmax=300., tmin=0., color=(1,0,0), picks=shflpick)
axisraw = figraw.gca()
axisraw.set_ylim([-300., -250.])
# procdnr.plot_psd(fmin=0.,fmax=300., color=(0,0,1), picks=shflpick)
figshfl = procdperm.plot_psd(fmin=0., fmax=300., tmin=0., color=(1,0,0), picks=shflpick)
axisshfl = figshfl.gca()
axisshfl.set_ylim([-300., -250.])
megpick = mne.pick_types(rawraw.info, meg=True, ref_meg=False, eeg= | False, eog=False, stim=False)
figraw1 = rawraw.plot_psd(fmin=0., fmax=300., tmin=0., color=(0,0,1), picks=megpick)
axisraw1 = figraw1.gca()
axisraw1.set_ylim([-300., -250.])
figshfl1 = procdperm.plot_psd(fmin=0., fmax=300., tmin=0., color=(0,0,1), picks=megpick)
axisshfl1 = figshfl1.gca()
axisshfl1.set_ylim([-300., -250.])
megnochgpick = np.setdiff1d(megpick, shflpick)
figraw2 = rawraw.plot_psd(fmin=0., fmax=300., tmin=0., color=(0,1,0), picks=megnoc | hgpick)
axisraw2 = figraw2.gca()
axisraw2.set_ylim([-300., -250.])
figshfl2 = procdperm.plot_psd(fmin=0., fmax=300., tmin=0., color=(0,1,0), picks=megnochgpick)
axisshfl2 = figshfl2.gca()
axisshfl2.set_ylim([-300., -250.])
|
# -*- coding: utf-8 -*-
# entry.py, part for evparse : EisF Video Parse, evdh Video Parse.
# entry: evparse/lib/hunantv
# version 0.1.0.0 test201505151816
# author sceext <sceext@foxmail.com> 2009EisF2015, 2015.05.
# copyright 2015 sceext
#
# This is FREE SOFTWARE, released under GNU GPLv3+
# please see README.md and LICENSE for more information.
#
# evparse : EisF Video Parse, evdh Video Parse.
# Copyright (C) 2015 sceext <sceext@foxmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# import
import re
from .. import error
from . import get_base_info
from . import get_video_info
# global vars
# version of this extractor
THIS_EXTRACTOR_VERSION = 'evparse lib/hunantv version 0.1.0.0 test201505151816'
# http://www.hunantv.com/v/2/150668/f/1518250.html#
# http://www.hunantv.com/v/2/51717/f/692063.html#
# http://www.hunantv.com/v/2/107768/f/1517224.html#
RE_SUPPORT_URL = '^http://www\.hunantv\.com/v/2/[0-9]+/f/[0-9]+\.html'
RE_VID = 'http://www\.hunantv\.com/v/2/[0-9]+/f/([0-9]+)\.html'
# global config obj
etc = {} # NOTE should be set
etc['flag_debug'] = False
etc['hd_min'] = 0
etc['hd_max'] = 0
# functions
def set_config(config):
# just copy it
etc['flag_debug'] = config['flag_debug']
etc['hd_min'] = config['hd_min']
etc['hd_max'] = config['hd_max']
# get vid
def get_vid(url_to):
vid_info = {}
vid_info['url'] = url_to
# get vid
vids = re.findall(RE_VID, url_to)
vid_info['vid'] = vids[0]
# done
return vid_info
def parse(url_to): # this site entry main entry function
# frist re-check url, if supported by this
if not re.match(RE_SUPPORT | _URL, url_to):
raise error.NotSupportURLError('not support this url', url_to)
# create evinfo
evinfo = {}
evinfo['info'] = {}
evinfo['video'] = []
# add some base info
evinfo['info']['url'] = url_to
evinfo['info']['site'] = 'hunantv'
# get vid
vid_info = get_vid(url_to)
# DEBUG info
if etc['flag_debug']:
print('lib.hunantv: DEBUG: got vid \"' + vid_info['vid'] + ' | \" ')
# get base, more info
info, more = get_base_info.get_info(vid_info, flag_debug=etc['flag_debug'])
# add more info
evinfo['info']['title'] = more['title']
evinfo['info']['title_sub'] = more['sub_title']
evinfo['info']['title_short'] = more['short_title']
evinfo['info']['title_no'] = more['no']
# get video info
evinfo['video'] = get_video_info.get_info(info, hd_min=etc['hd_min'], hd_max=etc['hd_max'], flag_debug=etc['flag_debug'])
# done
return evinfo
# end entry.py
|
. If the range is not known yet, the -1 is returned"""
return self.m
def get_ratio(self):
"""Return ratio c between keyset and the size of the memory"""
return self.ratio
def set_ratio(self,ratio):
"""sets the ration and therefore size of the data structure of the PHF"""
self.ratio = ratio
def set_limit(self, limit):
"""Sets the size of the memory bank for one hash function. This function can be used instead of the set ratio. BDZ computes three hash functions with nonoverlapping outputs. Outputs of these hash functions are used as a pointers to the memory. If user know amount of the memory, he may set the limit as 1/3 of the available memory. The ration and other parameters are computed when the key set is given. The limit value always take precedents before the ratio. To stop using limit value, limit should be set to the negative value."""
self.limit = limit;
def get_iteration_limit(self):
"""The BDZ algorithm may have fail to create PHF. The iteration_limit is used to limit the number of attempts of PHF creation"""
return self.iteration_limit
def set_iteration_limit(self,iteration_limit):
"""The BDZ algorithm may have fail to create PHF. The iteration_limit is used to limit the number of attempts of PHF creation"""
self.iteration_limit = iteration_limit
def get_order(self):
"""This function return the number of uniform hash function used to create hypergraph"""
return self.function_number
def set_order(self,number):
"""This function sets the number of hash function used for the creation of the hypergraph. It can not be changed after generation of the PHF"""
self.function_number = number
def set_keys(self, key_set):
"""This is a perfect hash function. For the construction of the PHF, the set of keys has to be known. This function gives set of keys to the function, so generate_seed can build correct function"""
self.key_set = key_set
self.known_keys = True
if self.limit > 0 :
#The limit is set, recompute ratio fo | r the given limit
self.ratio = (3.0*self.limit)/len(key_set)
def is_key_set(self):
"""This function return info | rmation, if the set of keys is prepared for the generation of the PHF"""
return self.known_keys
def _found_graph(self):
"""This is internal function. It generate random hypergraph according to the specification in the bdz class. It returns a queue of the edge and changes internal datastructure of BDZ class. Returned edges are ordered in such way, that they can be used for the construction of the PHF"""
#First step is to initialize seed
self.seed = dict()
#Second step is to generate the random hash functions
hashes = list()
for i in range(0,self.function_number):
x = jenkins_wrapper()
x.generate_seed()
# x = h3_hash()
# x.set_bitsize(16)
# x.set_input_size(len(self.key_set[0]))
# x.generate_seed()
hashes.append(x)
self.seed["hashes"] = hashes
#setting m
self.m = int(math.ceil(self.ratio * len(self.key_set)))
limit = int(math.ceil(float(self.m) /self.function_number))
self.m = 3*limit
#print("XXXXXXXXXXXXXXX",limit, self.m)
#Generation of hypergraph
hyper = graph()
hyper.set_order(self.function_number)
hyper.add_vertices(self.m)
#Generation of the edges of the hypergraph
for x in self.key_set:
values = list()
for i in self.seed["hashes"]:
#print("test",i.hash(x)%limit,limit*len(values))
vertex = (i.hash(x) % limit) + limit*len(values)
values.append(vertex)
#Add this edge into the hypergraph
e = hyper.add_edge(values)
# print(e.get_vertices())
#Add edge to the vertices
for v in values:
hyper.get_vertex(v).add_edge(e)
#Generate queue for the edge evaluation
queue_list = []
queue = deque()
#Boolean vector of the used edges
used = [False] * hyper.get_edge_number()
#First remove edges that have at least one vertex with degree 1
for i in range(0,hyper.get_edge_number()):
vert = hyper.get_edge(i).get_vertices()
#print([hyper.get_vertex(x).get_degree() for x in vert])
Deg = [hyper.get_vertex(x).get_degree() == 1 for x in vert]
if sum(Deg) > 0 and used[i] == False:
#This edge has at least one vertex with degree 1
used[i] = True
queue_list.append(i)
queue.append(i)
#Removing edges that have unique vertex (on the stack)
#adding a new edges with unique vertex into stack
while(len(queue)>0):
edge = queue.popleft()
#remove edge from the graph (only from vertex and decrease degree)
for v in hyper.get_edge(edge).get_vertices():
hyper.get_vertex(v).get_edges().remove(hyper.get_edge(edge))
deg = hyper.get_vertex(v).get_degree() - 1
#print("KVIK",deg)
hyper.get_vertex(v).set_degree(deg)
#if degree decrease to 1, the remaining edge should be added
#into the queue
if(deg == 1):
#Found the edge position
e1 = hyper.get_vertex(v).get_edges()[0]
position = hyper.get_edge_position(e1)
#If it is not in the queue, put it there
if used[position] == False:
queue.append(position)
queue_list.append(position)
used[position] = True
self.hyper = hyper
return queue_list
def _found_g(self,v,ed,vi):
"""This function computes value of the g array for given vertex. It uses plus operation."""
s = [self.g[s1] for s1 in self.hyper.get_edge(ed).get_vertices()]
sum1 = sum(s)-s[vi];
self.g[v] = (vi-sum1)%len(s)
return True;
def _found_g2(self,v,ed,vi):
"""This function computes value of the g array for given vertex by the use of the xor function. Assumes two bit representation of the g array"""
s = [self.g[s1] for s1 in self.hyper.get_edge(ed).get_vertices()]
sum1 = s[0];
for index in range(1,len(self.hyper.get_edge(ed).get_vertices())):
sum1 = sum1^s[index]
sum1 = sum1^s[vi]
self.g[v] = (vi^sum1)&3 #3 is the 11 in binary, therefore it clear all the higher bits to zero
return True
def generate_seed(self):
"""This function generates the PHF function according to the BDZ algorithm"""
if not self.known_keys:
raise NoData("The key set is unknown")
size = 0
iteration = 0
while(size != len(self.key_set) and self.iteration_limit > iteration):
queue = self._found_graph()
size = len(queue)
iteration = iteration+1
if(len(queue) != len(self.key_set)):
return False
self.g = [3] * self.m
marked_vertices = [False] *self.m
while(len(queue) > 0):
ed = queue.pop()
worked = False
for vi in range(0,len(self.hyper.get_edge(ed).get_vertices())):
v = self.hyper.get_edge(ed).get_vertices()[vi]
if(marked_vertices[v] == False and worked == False):
worked = self._found_g2(v,ed,vi)
marked_vertices[v] = True
# print(self.g)
# print(self.g)
# print(len(queue))
# print(len(self.key_set))
def hash(self, key):
limit = int(self.m /self.function_number)
# print(limit)
hashes = [x.hash(key)%limit for x in self.seed["hashes"]]
h1 = [hashes[x]+x*limit for x in range(0,len(hashes))]
g_val = [self.g[x] for x in h1]
|
#!/usr/bin/env python2
# | -*- coding: utf-8 -*-
"""
Created on Tue Jul 11 20:47:53 2017
@author: fernando
"""
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
plt.style.use('ggplot')
df = pd.read_csv("/home/fernando/CoursePythonDS/DAT210x/Module3/Datasets/wheat.data")
print df.describe()
df[df.groove>5].asymmetry.plot.hist(alpha=0.3, normed=True)
df[df.groove<=5].asymmetry.plot.hist(alpha=0.5, normed=True) |
plt.show() |
from pypers.core.step import Step
from pypers.steps.mothur import Mothur
import os
import json
import re
import glob
class MothurSummarySeqs(Mothur):
"""
Summarizes the quality of sequences in an unaligned or aligned fasta-formatted sequence file.
"""
spec = {
'name' : 'MothurSummarySeqs',
'version' : '20150512',
'descr' : [
'Summarizes the quality of sequences in an unaligned or aligned fasta-formatted sequence file'
],
'url' : 'www.mothur.org/wiki/Summary.seqs',
'args' : {
'inputs' : [
{
'name' : 'input_fasta',
'type' : 'file',
'iterable' : True,
'descr' : 'input fasta filename'
},
{
'name' : 'input_names',
'type' : 'file',
'iterable' : True,
'required' : False,
'descr' : 'input names filename'
},
{
'name' : 'input_counts',
'type' : 'file',
'iterable' : True,
'required' : False,
'descr' : 'input counts filename'
}
],
'outputs' : [
{
'name' : 'output_summary',
'type' : 'file',
'value' : '*.summary',
| 'descr': 'output summary filename'
},
{
| 'name' : 'output_log',
'type' : 'file',
'value' : '*.log.txt',
'descr': 'output summary logfile with tile summary table'
}
]
},
'requirements' : {
'cpus' : '8'
}
}
def process(self):
"""
Create the necessary input file links and run mothur command
"""
if type(self.input_fasta) != list:
self.input_fasta = [self.input_fasta]
if type(self.input_names) != list:
self.input_names = [self.input_names]
if type(self.input_counts) != list:
self.input_counts = [self.input_counts]
for idx, input_fasta in enumerate(self.input_fasta):
self.mk_links([input_fasta],self.output_dir)
input_fasta = os.path.join(self.output_dir,os.path.basename(input_fasta))
extra_params={'fasta':input_fasta}
if self.input_names[idx]:
input_names = os.path.join(self.output_dir,os.path.basename(self.input_names[idx]))
self.mk_links([self.input_names[idx]],self.output_dir)
extra_params['name'] = input_names
if self.input_counts[idx]:
input_counts = os.path.join(self.output_dir,os.path.basename(self.input_counts[idx]))
self.mk_links([self.input_counts[idx]],self.output_dir)
extra_params['count'] = input_counts
self.run_cmd('summary.seqs',extra_params)
|
import pytest
import datetime
import os
from helpers import ensure_dir
def pytest_configure(config):
if not hasattr(config, 'input'):
current_day = '{:%Y_%m_%d_%H_%S}'.format(datetime.datetime.now())
ensure_dir(os.path.join(os.path.dirname(__file__), 'input', current_day))
result_dir = os.path.join(os.path.dirname(__file__), 'results', current_day)
ensure_dir(result_dir)
result_dir_test_run = result_dir
ensure_dir(os.path.join(result_dir_test_run, 'screenshots'))
ensure_dir(os.path.join(result_dir_test_run, 'logcat'))
config.screen_shot_dir = os.path.join(result_dir_test_run, 'screenshots')
config.logcat_dir = os.path.join(result_dir_test_run, 'logcat')
class DeviceLogger:
def __init__(self, logcat_dir, screenshot_dir):
self.screenshot_dir = screenshot_dir
| self.logcat_dir = logcat_dir
@pytest.fixture(scope='function')
def device_logger(request):
logcat_dir = request.config.logcat_dir
screenshot_dir = request.config.screen | _shot_dir
return DeviceLogger(logcat_dir, screenshot_dir)
|
# import asyncio
#
# async def compute(x, y):
# print("Compute %s + %s ..." % (x, y))
# await asyncio.sleep(1.0)
# return x + y
#
# async def print_sum(x, y):
# for i in range(10):
# result = await compute(x, y)
# print("%s + %s = %s" % (x | , y, result))
#
# loop = asyncio.get_event_loop()
# loop.run_until_complete(print_sum(1,2))
# asyncio.ensure_future(print_sum(1, 2))
# asyncio.ensure_future(print_sum(3, 4))
# asyncio.ensure_future(print_sum(5, 6))
# loop.run_forever()
import asyncio
async def display_date(who, num):
i = 0
while True:
if i > num:
return
print('{}: Before loop {}'.format(who, i))
| await asyncio.sleep(1)
i += 1
loop = asyncio.get_event_loop()
asyncio.ensure_future(display_date('AAA', 4))
asyncio.ensure_future(display_date('BBB', 6))
loop.run_forever()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
import django.contrib.auth.models
class Migration(migrations.Migration):
dependencies = [
('core', '0011_atmosphere_user_manager_update'),
]
operations = [
migrations.AlterField(
model_name='allocationstrategy', name='refresh_behaviors', field=models.ManyToManyField(
to='core.RefreshBehavior', blank=True), ), migrations.AlterField(
model_name='allocationstrategy', name='rules_behaviors', field=models.ManyToManyField(
to='core.RulesBehavior', blank=True), ), migrations.AlterField(
model_name='machinerequest', name='new_machine_licenses', field=models.ManyToManyField(
to='core.License', blank=True), ), migrations.AlterField(
model_name='project', name='applications', field=models.ManyToManyField(
related_name='proj | ects', to='core.Application' | , blank=True), ), migrations.AlterField(
model_name='project', name='instances', field=models.ManyToManyField(
related_name='projects', to='core.Instance', blank=True), ), migrations.AlterField(
model_name='project', name='volumes', field=models.ManyToManyField(
related_name='projects', to='core.Volume', blank=True), ), migrations.AlterField(
model_name='providermachine', name='licenses', field=models.ManyToManyField(
to='core.License', blank=True), ), ]
|
from setuptools import setup
version = '1.4'
testing_extras = ['nose', 'coverage']
docs_extras = ['Sphinx']
setup(
name='WebOb',
version=version,
description="WSGI request and response object",
long_description="""\
WebOb provides wrappers around the WSGI request environment, and an
object to help create WSGI responses.
The objects map much of the specified behavior of HTTP, including
header parsing and accessors for other standard parts of the
environment.
You may install the `in-development version of WebOb
<https://github.com/Pylons/webob/zipball/master#egg=WebOb-dev>`_ with
``pip install WebOb==dev`` (or ``easy_install WebOb==dev``).
* `WebOb reference <http://docs.webob.org/en/latest/reference.html>`_
* `Bug tracker <https://github.com/Pylons/webob/issues>`_
* `Browse source code <https://github.com/Pylons/webob>`_
* `Mailing list <http://bit.ly/paste-users>`_
* `Release news <http://docs.webob.org/en/latest/news.html>`_
* `Detailed changelog <https://github.com/Pylons/webob/commits/master>`_
""",
classifiers=[
"Development Status :: 6 - Mature",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy" | ,
],
keywords='wsgi request web http',
au | thor='Ian Bicking',
author_email='ianb@colorstudy.com',
maintainer='Pylons Project',
url='http://webob.org/',
license='MIT',
packages=['webob'],
zip_safe=True,
test_suite='nose.collector',
tests_require=['nose'],
extras_require = {
'testing':testing_extras,
'docs':docs_extras,
},
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
|
import os
import codecs
from setuptools import setup
def read(fname):
file_path = os | .path.join(os.path.dirname(__file__), fname)
return codecs.open(file_path, encoding='utf-8').read()
setup(
name='pytest-typehints',
version='0.1.0',
author='Edward Dunn Ekelund',
author_email='edward.ekelund@gmail.com',
maintainer='Edward Dunn Ekelund',
maintainer_email='edward.ekelund@gmail.com',
license='BSD-3',
url='https://github.com/eddie-dunn/pytest-typehints',
description='Pytest plugin that checks for type hinting',
long_description=read('README.rst'),
py_modules=['pytest_typehints'],
install_requires=['pytest>=2.9.2'],
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Pytest',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: OS Independent',
'License :: OSI Approved :: BSD License',
],
entry_points={
'pytest11': [
'typehints = pytest_typehints',
],
},
)
|
"""
Copyright (C) 2014 Vahid Rafiei (@vahid_r)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software | Foundation, either version 2 of the License, or
( | at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import unittest
from testbuilder.utils import get_version
class TestUtilsModuleFunctions(unittest.TestCase):
""" This is a test skeleton for module-level functions at the utils module"""
def test_version(self):
self.assertEquals("0.9", get_version(), "The current version should be 0.9")
if __name__ == "__main__":
unittest.main() |
return result
def read_timestamp(self):
"""Read and AMQP timestamp, which is a 64-bit integer representing
seconds since the Unix epoch in 1-second resolution.
Return as a Python datetime.datetime object,
expressed as localtime.
"""
return datetime.utcfromtimestamp(self.read_longlong())
class AMQPWriter(object):
"""Convert higher-level AMQP types to bytestreams."""
def __init__(self, dest=None):
"""dest may be a file-type object (with a write() method). If None
then a BytesIO is created, and the contents can be accessed with
this class's getvalue() method."""
self.out = BytesIO() if dest is None else dest
self.bits = []
self.bitcount = 0
def _flushbits(self):
if self.bits:
out = self.out
for b in self.bits:
out.write(pack('B', b))
self.bits = []
self.bitcount = 0
def close(self):
"""Pass through if possible to any file-like destinations."""
try:
self.out.close()
except AttributeError:
pass
def flush(self):
"""Pass through if possible to any file-like destinations."""
try:
self.out.flush()
except AttributeError:
pass
def getvalue(self):
"""Get what's been encoded so far if we're working with a BytesIO."""
self._flushbits()
return self.out.getvalue()
def write(self, s):
"""Write a plain Python string with no special encoding in Python 2.x,
or bytes in Python 3.x"""
self._flushbits()
self.out.write(s)
def write_bit(self, b):
"""Write a boolean value."""
b = 1 if b else 0
shift = self.bitcount % 8
if shift == 0:
self.bits.append(0)
self.bits[-1] |= (b << shift)
self.bitcount += 1
def write_octet(self, n):
"""Write an integer as an unsigned 8-bit value."""
if n < 0 or n > 255:
raise FrameSyntaxError(
'Octet {0!r} out of range 0..255'.format(n))
self._flushbits()
self.out.write(pack('B', n))
def write_short(self, n):
"""Write an integer as an unsigned 16-bit value."""
if n < 0 or n > 65535:
raise FrameSyntaxError(
'Octet {0!r} out of range 0..65535'.format(n))
self._flushbits()
self.out.write(pack('>H', int(n)))
def write_long(self, n):
"""Write an integer as an unsigned2 32-bit value."""
if n < 0 or n >= 4294967296:
raise FrameSyntaxError(
'Octet {0!r} out of range 0..2**31-1'.format(n))
self._flushbits()
self.out.write(pack('>I', n))
def write_longlong(self, n):
"""Write an integer as an unsigned 64-bit value."""
if n < 0 or n >= 18446744073709551616:
raise FrameSyntaxError(
'Octet {0!r} out of range 0..2**64-1'.format(n))
self._flushbits()
self.out.write(pack('>Q', n))
def write_shortstr(self, s):
"""Write a string up to 255 bytes long (after any encoding).
If passed a unicode string, encode with UTF-8.
"""
self._flushbits()
if isinstance(s, string):
s = s.encode('utf-8')
if len(s) > 255:
raise FrameSyntaxError(
'Shortstring overflow ({0} > 255)'.format(len(s)))
self.write_octet(len(s))
self.out.write(s)
def write_longstr(self, s):
"""Write a string up to 2**32 bytes long after encoding.
If passed a unicode string, encode as UTF-8.
"""
self._flushbits()
if isinstance(s, string):
s = s.encode('utf-8')
self.write_long(len(s))
self.out.write(s)
def write_table(self, d):
"""Write out a Python dictionary made of up string keys, and values
that are strings, signed integers, Decimal, datetime.datetime, or
sub-dictionaries following the same constraints."""
self._flushbits()
table_data = AMQPWriter()
for k, v in items(d):
table_data.write_shortstr(k)
table_data.write_item(v, k)
table_data = table_data.getvalue()
self.write_long(len(table_data))
self.out.write(table_data)
def write_item(self, v, k=None):
if isinstance(v, (string_t, bytes)):
if isinstance(v, string):
v = v.encode('utf-8')
self.write(b'S')
self.write_longstr(v)
elif isinstance(v, bool):
self.write(pack('>cB', b't', int(v)))
elif isinstance(v, float):
self.write(pack('>cd', b'd', v))
elif isinstance(v, int_types):
self.write(pack('>ci', b'I', v))
elif isinstance(v, Decimal):
self.write(b'D')
sign, digits, exponent = v.as_tuple()
v = 0
for d in digits:
v = (v * 10) + d
if sign:
v = -v
self.write_octet(-exponent)
self.write(pack('>i', v))
elif isinstance(v, datetime):
self.write(b'T')
self.write_timestamp(v)
elif isinstance(v, dict):
self.write(b'F')
self.write_table(v)
elif isinstance( | v, (list, tuple)):
self.write(b'A')
self.write_array(v)
elif v is None:
self.write(b'V')
else:
err = (ILLEGAL_TABLE_TYPE_WITH_KEY.format(type(v), k, v) if k
| else ILLEGAL_TABLE_TYPE.format(type(v), v))
raise FrameSyntaxError(err)
def write_array(self, a):
array_data = AMQPWriter()
for v in a:
array_data.write_item(v)
array_data = array_data.getvalue()
self.write_long(len(array_data))
self.out.write(array_data)
def write_timestamp(self, v):
"""Write out a Python datetime.datetime object as a 64-bit integer
representing seconds since the Unix epoch."""
self.out.write(pack('>Q', long_t(calendar.timegm(v.utctimetuple()))))
class GenericContent(object):
"""Abstract base class for AMQP content.
Subclasses should override the PROPERTIES attribute.
"""
PROPERTIES = [('dummy', 'shortstr')]
def __init__(self, **props):
"""Save the properties appropriate to this AMQP content type
in a 'properties' dictionary."""
d = {}
for propname, _ in self.PROPERTIES:
if propname in props:
d[propname] = props[propname]
# FIXME: should we ignore unknown properties?
self.properties = d
def __eq__(self, other):
"""Check if this object has the same properties as another
content object."""
try:
return self.properties == other.properties
except AttributeError:
return NotImplemented
def __getattr__(self, name):
"""Look for additional properties in the 'properties'
dictionary, and if present - the 'delivery_info'
dictionary."""
if name == '__setstate__':
# Allows pickling/unpickling to work
raise AttributeError('__setstate__')
if name in self.properties:
return self.properties[name]
if 'delivery_info' in self.__dict__ \
and name in self.delivery_info:
return self.delivery_info[name]
raise AttributeError(name)
def _load_properties(self, raw_bytes):
"""Given the raw bytes containing the property-flags and property-list
from a content-frame-header, parse and insert into a dictionary
stored in this object as an attribute named 'properties'."""
r = AMQPReader(raw_bytes)
#
# Read 16-bit shorts until we get one with a low bit set to zero
#
flags = []
while 1:
flag_bits = r.read_short()
flags.append(flag_bits)
if flag_bits & 1 == 0:
break
shift = 0
d = |
elif origin in ["programmer", "developer", "source code"]:
devcomments = super().getnotes("developer")
autocomments = self.getautomaticcomments()
if devcomments == autocomments or autocomments.find(devcomments) >= 0:
devcomments = ""
elif devcomments.find(autocomments) >= 0:
autocomments = devcomments
devcomments = ""
return autocomments
else:
return super().getnotes(origin)
def markfuzzy(self, value=True):
super().markfuzzy(value)
for unit in self.units[1:]:
unit.markfuzzy(value)
def marktranslated(self):
super().marktranslated()
for unit in self.units[1:]:
unit.marktranslated()
def setid(self, id):
super().setid(id)
if len(self.units) > 1:
for i in range(len(self.units)):
self.units[i].setid("%s[%d]" % (id, i))
def getlocations(self):
"""Returns all the references (source locations)"""
groups = self.getcontextgroups("po-reference")
references = []
for group in groups:
sourcefile = ""
linenumber = ""
for (type, text) in group:
if type == "sourcefile":
sourcefile = text
elif type == "linenumber":
linenumber = text
assert sourcefile
if linenumber:
sourcefile = sourcefile + ":" + linenumber
references.append(sourcefile)
return references
def getautomaticcomments(self):
"""Returns the automatic comments (x-po-autocomment), which corresponds
to the #. style po comments.
"""
def hasautocomment(grp):
return grp[0] == "x-po-autocomment"
groups = self.getcontextgroups("po-entry")
comments = []
for group in groups:
commentpairs = filter(hasautocomment, group)
for (type, text) in commentpairs:
comments.append(text)
return "\n".join(comments)
def gettranslatorcomments(self):
"""Returns the translator comments (x-po-trancomment), which
corresponds to the # style po comments.
"""
def hastrancomment(grp):
return grp[0] == "x-po-trancomment"
groups = self.getcontextgroups("po-entry")
comments = []
for group in groups:
commentpairs = filter(hastrancomment, group)
for (type, text) in commentpairs:
comments.append(text)
return "\n".join(comments)
def isheader(self):
return "gettext-domain-header" in (self.getrestype() or "")
def istranslatable(self):
return super().istranslatable() and not self.isheader()
@classmethod
def createfromxmlElement(cls, element, namespace=None):
if element.tag.endswith("trans-unit"):
object = cls(None, empty=True)
object.xmlelement = element
object.namespace = namespace
return object
assert element.tag.endswith("group")
group = cls(None, empty=True)
group.xmlelement = element
group.namespace = namespace
units = list(element.iterdescendants(group.namespaced("trans-unit")))
for unit in units:
subunit = xliff.xliffunit.createfromxmlElement(unit)
subunit.namespace = namespace
group.units.append(subunit)
re | turn group
def hasplural(self):
return self.xmlelement.tag == self.namespaced("group")
class PoXliffFile(xliff.xlifffile, poheader.poheader):
"""a file for the po variant of Xliff files"""
UnitClass = PoXliffUnit
def __init__(self, *args, **kwargs):
if "sourcelanguage" not in kwargs:
kwargs["sourcelanguage"] = "en-US"
xliff.xlifffile.__init__(self, *args, **kwargs)
def createfile | node(self, filename, sourcelanguage="en-US", datatype="po"):
# Let's ignore the sourcelanguage parameter opting for the internal
# one. PO files will probably be one language
return super().createfilenode(
filename, sourcelanguage=self.sourcelanguage, datatype="po"
)
def _insert_header(self, header):
header.xmlelement.set("restype", "x-gettext-domain-header")
header.xmlelement.set("approved", "no")
setXMLspace(header.xmlelement, "preserve")
self.addunit(header)
def addheaderunit(self, target, filename):
unit = self.addsourceunit(target, filename, True)
unit.target = target
unit.xmlelement.set("restype", "x-gettext-domain-header")
unit.xmlelement.set("approved", "no")
setXMLspace(unit.xmlelement, "preserve")
return unit
def addplural(self, source, target, filename, createifmissing=False):
"""This method should now be unnecessary, but is left for reference"""
assert isinstance(source, multistring)
if not isinstance(target, multistring):
target = multistring(target)
sourcel = len(source.strings)
targetl = len(target.strings)
if sourcel < targetl:
sources = source.strings + [source.strings[-1]] * targetl - sourcel
targets = target.strings
else:
sources = source.strings
targets = target.strings
self._messagenum += 1
pluralnum = 0
group = self.creategroup(filename, True, restype="x-gettext-plural")
for (src, tgt) in zip(sources, targets):
unit = self.UnitClass(src)
unit.target = tgt
unit.setid("%d[%d]" % (self._messagenum, pluralnum))
pluralnum += 1
group.append(unit.xmlelement)
self.units.append(unit)
if pluralnum < sourcel:
for string in sources[pluralnum:]:
unit = self.UnitClass(src)
unit.xmlelement.set("translate", "no")
unit.setid("%d[%d]" % (self._messagenum, pluralnum))
pluralnum += 1
group.append(unit.xmlelement)
self.units.append(unit)
return self.units[-pluralnum]
def parse(self, xml):
"""Populates this object from the given xml string"""
# TODO: Make more robust
def ispluralgroup(node):
"""determines whether the xml node refers to a getttext plural"""
return node.get("restype") == "x-gettext-plurals"
def isnonpluralunit(node):
"""determindes whether the xml node contains a plural like id.
We want to filter out all the plural nodes, except the very first
one in each group.
"""
return re.match(r".+\[[123456]\]$", node.get("id") or "") is None
def pluralunits(pluralgroups):
for pluralgroup in pluralgroups:
yield self.UnitClass.createfromxmlElement(
pluralgroup, namespace=self.namespace
)
self.filename = getattr(xml, "name", "")
if hasattr(xml, "read"):
xml.seek(0)
xmlsrc = xml.read()
xml = xmlsrc
parser = etree.XMLParser(resolve_entities=False)
self.document = etree.fromstring(xml, parser).getroottree()
self.initbody()
root_node = self.document.getroot()
assert root_node.tag == self.namespaced(self.rootNode)
groups = root_node.iterdescendants(self.namespaced("group"))
pluralgroups = filter(ispluralgroup, groups)
termEntries = root_node.iterdescendants(
self.namespaced(self.UnitClass.rootNode)
)
singularunits = list(filter(isnonpluralunit, termEntries))
if len(singularunits) == 0:
return
pluralunit_iter = pluralunits(pluralgroups)
nextplural = next(pluralunit_iter, None)
for entry in singularunits:
term = self.UnitClass.createfromxmlElement(entry, namespace=self.namespace)
if nextplural and str(term.getid()) == ("%s[0]" % nextplural.getid()): |
# encoding: utf-8
from bs4 import BeautifulSoup
from okscraper.base import BaseScraper
from okscraper.sources import UrlSource, ScraperSource
from okscraper.storages import ListStorage, DictStorage
from lobbyists.models import LobbyistHistory, Lobbyist, LobbyistData, LobbyistRepresent, LobbyistRepresentData
from persons.models import Person
from django.core.exceptions import ObjectDoesNotExist
from datetime import datetime
from lobbyist_represent import LobbyistRepresentScraper
class LobbyistScraperDictStorage(DictStorage):
"""
This storage first determines if a new Lobbyist object needs to be created:
it searches for a Lobbyist object with the same source_id and first / last name
if such an object exists - it uses that object, otherwise created a new Lobbyist
It then updates the lobbyist.data:
it gets the last LobbyistData object for this lobbyist and compares that to the current data
if it matches - then that object is used and a new object is not created
else - a new LobbyistData object is created and appended to the lobbyist.data
This storage returns the lobbyist object
"""
_commitInterval = -1
def _get_data_keys(self):
return ['first_name', 'family_name', 'profession', 'corporation_name', 'corporation_id', 'faction_member', 'faction_name', 'permit_type']
def _get_represents_data(self, source_id):
return LobbyistRepresentScraper().scrape(source_id)
def _get_latest_lobbyist_data(self, lobbyist):
return lobbyist.latest_data
def _get_last_lobbyist_data(self, lobbyist, data):
try:
last_lobbyist_data = self._get_latest_lobbyi | st_data(lobbyist)
except ObjectDoesNotExist:
last_lobbyist_data = None
if last_lobbyist_data is not None:
for key in self._get_data_keys():
if data[key] != getattr(last_lobbyis | t_data, key):
last_lobbyist_data = None
break
if last_lobbyist_data is not None:
represent_ids = sorted(data['represents'], key=lambda represent: represent.id)
last_represent_ids = sorted(last_lobbyist_data.represents.all(), key=lambda represent: represent.id)
if represent_ids != last_represent_ids:
last_lobbyist_data = None
return last_lobbyist_data
def commit(self):
super(LobbyistScraperDictStorage, self).commit()
data = self._data
source_id = data['id']
data['represents'] = self._get_represents_data(source_id)
full_name = '%s %s' % (data['first_name'], data['family_name'])
q = Lobbyist.objects.filter(source_id=source_id, person__name=full_name)
if q.count() > 0:
lobbyist = q[0]
else:
lobbyist = Lobbyist.objects.create(person=Person.objects.create(name=full_name), source_id=source_id)
self._data = lobbyist
last_lobbyist_data = self._get_last_lobbyist_data(lobbyist, data)
if last_lobbyist_data is None:
kwargs = {}
for key in self._get_data_keys():
kwargs[key] = data[key]
kwargs['source_id'] = source_id
lobbyist_data = LobbyistData.objects.create(**kwargs)
for represent in data['represents']:
lobbyist_data.represents.add(represent)
lobbyist_data.scrape_time = datetime.now()
lobbyist_data.save()
lobbyist.data.add(lobbyist_data)
else:
lobbyist.data.add(last_lobbyist_data)
lobbyist.save()
class LobbyistScraper(BaseScraper):
"""
This scraper gets a lobbyist id, it then goes to the knesset api to get the data about the lobbyist
"""
def __init__(self):
super(LobbyistScraper, self).__init__()
self.source = UrlSource('http://online.knesset.gov.il/WsinternetSps/KnessetDataService/LobbyistData.svc/View_lobbyist(<<id>>)')
self.storage = LobbyistScraperDictStorage()
def _storeLobbyistDataFromSoup(self, soup):
lobbyist_id = soup.find('d:lobbyist_id').text.strip()
self._getLogger().info('got lobbyist id "%s"', lobbyist_id)
lobbyist = {
'id': lobbyist_id,
'first_name': soup.find('d:first_name').text.strip(),
'family_name': soup.find('d:family_name').text.strip(),
'profession': soup.find('d:profession').text.strip(),
'corporation_name': soup.find('d:corporation_name').text.strip(),
'corporation_id': soup.find('d:corporation_id').text.strip(),
'faction_member': soup.find('d:faction_member').text.strip(),
'faction_name': soup.find('d:faction_name').text.strip(),
'permit_type': soup.find('d:lobyst_permit_type').text.strip(),
}
self.storage.storeDict(lobbyist)
self._getLogger().debug(lobbyist)
def _scrape(self, lobbyist_id):
html = self.source.fetch(lobbyist_id)
soup = BeautifulSoup(html)
return self._storeLobbyistDataFromSoup(soup)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.