prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
#!/usr/bin/env python
import logging
from ncclient import manager
from ncclient.xml_ import *
def connect(host, port, user, password):
conn = manager.connect(host=host,
port=port,
username=user,
| password=password,
timeout=60,
device_params={'name': 'junos'},
hostkey_verify=False)
result = conn.get_software_information('brief', test='me')
logging.info(result)
result = conn.get_chassis_inventory('extensive')
logging.info(result)
if __name__ == '__main__':
LOG_FORMAT = '%(a | sctime)s %(levelname)s %(filename)s:%(lineno)d %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=LOG_FORMAT)
connect('router', 830, 'netconf', 'juniper!')
|
='group_new'),
url(r'^groups/$', groups.groups, name='groups'),
url(r'^events/request/$', events.event_request, name='event_request'),
url(r'^events/(?P<id>\d+)/$', events.event_edit, name='event_edit'),
url(r'^events/(?P<id>\d+)/privacy-vidly-mismatch/$',
events.event_privacy_vidly_mismatch,
name='event_privacy_vidly_mismatch'),
url(r'^events/(?P<id>\d+)/assignment/$',
events.event_assignment,
name='event_assignment'),
url(r'^events/(?P<id>\d+)/transcript/$',
events.event_transcript,
name='event_transcript'),
url(r'^events/(?P<id>\d+)/upload/$',
events.event_upload,
name='event_upload'),
url(r'^events/(?P<id>\d+)/vidly-submissions/$',
events.event_vidly_submissions,
name='event_vidly_submissions'),
url(r'^events/(?P<id>\d+)/vidly-submissions/submission'
r'/(?P<submission_id>\d+)/$',
events.event_vidly_submission,
name='event_vidly_submission'),
url(r'^events/(?P<id>\d+)/comments/$',
events.event_comments,
name='event_comments'),
url(r'^events/(?P<id>\d+)/comments/configuration/$',
events.event_discussion,
name='event_discussion'),
url(r'^events/(?P<id>\d+)/stop-live/$', events.event_stop_live,
name='stop_live_event'),
url(r'^events/(?P<id>\d+)/delete/$', events.event_delete,
name='event_delete'),
url(r'^events/(?P<id>\d+)/survey/$', events.event_survey,
name='event_survey'),
url(r'^events/(?P<id>\d+)/tweets/$', events.event_tweets,
name='event_tweets'),
url(r'^events/(?P<id>\d+)/tweets/new/$', events.new_event_tweet,
name='new_event_tweet'),
url(r'^events/all/tweets/$', events.all_event_tweets,
name='all_event_tweets'),
url(r'^events/archive/(?P<id>\d+)/$', events.event_archive,
name='event_archive'),
url(r'^events/archive/(?P<id>\d+)/auto/$',
events.event_archive_auto,
name='event_archive_auto'),
url(r'^events/(?P<id>\d+)/archive-time/$', events.event_archive_time,
name='event_archive_time'),
url(r'^events/fetch/duration/(?P<id>\d+)/$',
events.event_fetch_duration,
name='event_fetch_duration'),
url(r'^events/fetch/screencaptures/(?P<id>\d+)/$',
events.event_fetch_screencaptures,
name='event_fetch_screencaptures'),
url(r'^events/duplicate/(?P<duplicate_id>\d+)/$', events.event_request,
name='event_duplicate'),
url(r'^events/vidlyurltoshortcode/(?P<id>\d+)/',
events.vidly_url_to_shortcode,
name='vidly_url_to_shortcode'),
url(r'^events/hits/$', events.event_hit_stats, name='event_hit_stats'),
url(r'^events/assignments/$',
events.event_assignments,
name='event_assignments'),
url(r'^events/assignments.ics$',
events.event_assignments_ical,
na | me='event_assignments_ical'),
url(r'^events/$', events.events, name='events'),
url(r'^events/data/$', events.events_data, name='events_data'),
url(r'^events/redirect_thumbnail/(?P<id>\d+)/$',
events.redirect_event_thumbnail,
name='redirect_event_thumbnail'),
url(r'^surveys/$', | surveys.surveys_, name='surveys'),
url(r'^surveys/new/$', surveys.survey_new, name='survey_new'),
url(r'^surveys/(?P<id>\d+)/$', surveys.survey_edit, name='survey_edit'),
url(r'^surveys/(?P<id>\d+)/delete/$', surveys.survey_delete,
name='survey_delete'),
url(r'^surveys/(?P<id>\d+)/questions/$', surveys.survey_questions,
name='survey_questions'),
url(r'^surveys/(?P<id>\d+)/question/(?P<question_id>\d+)/$',
surveys.survey_question_edit,
name='survey_question_edit'),
url(r'^surveys/(?P<id>\d+)/question/(?P<question_id>\d+)/delete/$',
surveys.survey_question_delete,
name='survey_question_delete'),
url(r'^surveys/(?P<id>\d+)/question/new/$',
surveys.survey_question_new,
name='survey_question_new'),
url(r'^comments/$', comments.all_comments, name='all_comments'),
url(r'^comments/(?P<id>\d+)/$',
comments.comment_edit,
name='comment_edit'),
url(r'^events-autocomplete/$', events.event_autocomplete,
name='event_autocomplete'),
url(r'^channels/new/$', channels.channel_new, name='channel_new'),
url(r'^channels/(?P<id>\d+)/$', channels.channel_edit,
name='channel_edit'),
url(r'^channels/remove/(?P<id>\d+)/$', channels.channel_remove,
name='channel_remove'),
url(r'^channels/$', channels.channels, name='channels'),
url(r'^templates/env-autofill/$', templates.template_env_autofill,
name='template_env_autofill'),
url(r'^templates/new/$', templates.template_new, name='template_new'),
url(r'^templates/(?P<id>\d+)/$', templates.template_edit,
name='template_edit'),
url(r'^templates/(?P<id>\d+)/migrate/$', templates.template_migrate,
name='template_migrate'),
url(r'^templates/remove/(?P<id>\d+)/$', templates.template_remove,
name='template_remove'),
url(r'^templates/$', templates.templates, name='templates'),
url(r'^tags/$', tags.tags, name='tags'),
url(r'^tags/data/$', tags.tags_data, name='tags_data'),
url(r'^tags/(?P<id>\d+)/$', tags.tag_edit, name='tag_edit'),
url(r'^tags/remove/(?P<id>\d+)/$', tags.tag_remove, name='tag_remove'),
url(r'^tags/merge/(?P<id>\d+)/$', tags.tag_merge, name='tag_merge'),
url(r'^locations/new/$', locations.location_new, name='location_new'),
url(r'^locations/(?P<id>\d+)/$', locations.location_edit,
name='location_edit'),
url(r'^locations/remove/(?P<id>\d+)/$', locations.location_remove,
name='location_remove'),
url(r'^locations/tz/$', locations.location_timezone,
name='location_timezone'),
url(r'^locations/$', locations.locations, name='locations'),
url(r'^regions/new/$', regions.region_new, name='region_new'),
url(r'^regions/(?P<id>\d+)/$', regions.region_edit,
name='region_edit'),
url(r'^regions/remove/(?P<id>\d+)/$', regions.region_remove,
name='region_remove'),
url(r'^regions/$', regions.regions, name='regions'),
url(r'^topics/new/$', topics.topic_new, name='topic_new'),
url(r'^topics/(?P<id>\d+)/$', topics.topic_edit,
name='topic_edit'),
url(r'^topics/remove/(?P<id>\d+)/$', topics.topic_remove,
name='topic_remove'),
url(r'^topics/$', topics.topics, name='topics'),
url(r'^approvals/$', approvals.approvals, name='approvals'),
url(r'^approvals/reconsider/$', approvals.approval_reconsider,
name='approval_reconsider'),
url(r'^approvals/(?P<id>\d+)/$', approvals.approval_review,
name='approval_review'),
url(r'^pages/$', staticpages.staticpages, name='staticpages'),
url(r'^pages/new/$', staticpages.staticpage_new, name='staticpage_new'),
url(r'^pages/(?P<id>\d+)/$', staticpages.staticpage_edit,
name='staticpage_edit'),
url(r'^pages/remove/(?P<id>\d+)/$', staticpages.staticpage_remove,
name='staticpage_remove'),
url(r'^suggestions/$', suggestions.suggestions, name='suggestions'),
url(r'^suggestions/(?P<id>\d+)/$', suggestions.suggestion_review,
name='suggestion_review'),
url(r'^vidly/$', vidly_media.vidly_media,
name='vidly_media'),
url(r'^vidly/timings/$', vidly_media.vidly_media_timings,
name='vidly_media_timings'),
url(r'^vidly/timings/data/$', vidly_media.vidly_media_timings_data,
name='vidly_media_timings_data'),
url(r'^vidly/webhook/$', vidly_media.vidly_media_webhook,
name='vidly_media_webhook'),
url(r'^vidly/status/$', vidly_media.vidly_media_status,
name='vidly_media_status'),
url(r'^vidly/info/$', vidly_media.vidly_media_info,
name='vidly_media_info'),
url(r'^vidly/resubmit/$', vidly_media.vidly_media_resubmit,
name='vidly_media_resubmit'),
url(r'^urltransforms/$', url_transforms.url_transforms,
name='url_transforms'),
url(r'^urltransforms/new/$', url_transforms.url_match_new,
name='url_match_new'),
url(r'^urltransforms/run/$', url_transforms.u |
from email.utils import formatdate
from typing import Optional, Type, TypeVar
from twisted.internet import defer
from twisted.internet.error import (
ConnectError,
ConnectionDone,
ConnectionLost,
ConnectionRefusedError,
DNSLookupError,
TCPTimedOutError,
TimeoutError,
)
from twisted.web.client import ResponseFailed
from scrapy import signals
from scrapy.crawler import Crawler
from scrapy.exceptions import IgnoreRequest, NotConfigured
from scrapy.http.request import Request
from scrapy.http.response import Response
from scrapy.settings import Settings
from scrapy.spiders import Spider
from scrapy.statscollectors import StatsCollector
from scrapy.utils.misc import load_object
HttpCacheMiddlewareTV = TypeVar("HttpCacheMiddlewareTV", bound="HttpCacheMiddleware")
class HttpCacheMiddleware:
DOWNLOAD_EXCEPTIONS = (defer.TimeoutError, TimeoutError, DNSLookupError,
ConnectionRefusedError, ConnectionDone, ConnectError,
ConnectionLost, TCPTimedOutError, ResponseFailed,
IOError)
def __init__(self, settings: Settings, stats: StatsCollector) -> None:
if not settings.getbool('HTTPCACHE_ENABLED'):
raise NotConfigured
self.policy = load_object(settings['HTTPCACHE_POLICY'])(settings)
self.storage = load_object(settings['HTTPCACHE_STORAGE'])(settings)
self.ignore_missing = settings.getbool('HTTPCACHE_IGNORE_MISSING')
self.stats = stats
@classmethod
def from_crawler(cls: Type[HttpCacheMiddlewareTV], crawler: Crawler) -> HttpCacheMiddlewareTV:
o = cls(crawler.settings, crawler.stats)
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
crawler.signals.connect(o.spider_closed, signal=signals.spider_closed)
return o
def spider_opened(self, spider: Spider) -> None:
self.storage.open_spider(spider)
def spider_closed(self, spider: Spider) -> None:
self.storage.close_spider(spider)
def process_request(self, request: Request, spider: Spider) -> Optional[Response]:
if request.meta.get('dont_cache', False):
return None
# Skip uncacheable requests
if not self.policy.should_cache_request(request):
request.meta['_dont_cache'] = True # flag as uncacheable
return None
# Look for cached response and check if expired
cachedresponse = self.storage.retrieve_response(spider, request)
if cachedresponse is None:
self.stats.inc_value('httpcache/miss', spider=spider)
if self.ignore_missing:
self.stats.inc_value('httpcache/ignore', spider=spider)
raise IgnoreRequest("Ignored request not in cache: %s" % request)
return None # first time request
# Return cached response only if not expired
cachedresponse.flags.append('cached')
if self.policy.is_cached_response_fresh(cachedresponse, request):
self.stats.inc_value('httpcache/hit', spider=spider)
return cachedresponse
# Keep a reference to cached response to avoid a second cache lookup on
# process_response hook
request.meta['cached_response'] = cachedresponse
return None
def process_response(self, request: Request, response: Response, spider: Spider) -> Response:
if request.meta.get('dont_cache', False):
return response
# Skip cached responses and uncacheable requests
if 'cached' in response.flags or '_dont_cache' in request.meta:
request.meta.pop('_dont_cache', None)
return response
# RFC2616 requires origin server to set Date header,
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.18
if 'Date' not in response.headers:
response.headers['Date'] = formatdate(usegmt=True)
# Do not validate first-hand responses
cachedresponse = request.meta.pop('cached_response', None)
if cachedresponse is None:
self.stats.inc_value('httpcache/firsthand', spider=spider)
self._cache_response(spider, response, request, cachedresponse)
return response
if self.policy.is_cached_response_valid(cachedresponse, response, request):
self.stats.inc_value('httpcache/revalidate', spider=spider)
return cachedresponse
self.stats.inc_value('httpcache/invalidate', spider=spider)
self._cache_response(spider, response, request, cachedresponse)
return response
def process_exception(
self, request: Request, exception: Exception, spider: Spider
) -> Optional[ | Response]:
cachedresponse = request.meta.pop('cached_response', None)
if cachedresponse is not None and isinstance(exception, self.DOWNLOAD_EXCEPTIONS):
self.stats.inc_value('httpcache/errorrecovery', spider=spider)
| return cachedresponse
return None
def _cache_response(
self, spider: Spider, response: Response, request: Request, cachedresponse: Optional[Response]
) -> None:
if self.policy.should_cache_response(response, request):
self.stats.inc_value('httpcache/store', spider=spider)
self.storage.store_response(spider, request, response)
else:
self.stats.inc_value('httpcache/uncacheable', spider=spider)
|
def | greeting(msg):
print(msg)
def v | alediction(msg):
for char in reversed(msg):
print(char, end = "" )
print("\n")
|
# -*- coding: utf-8 -*-
# pylint: disable=C0321,E1120,E1123,W0223
"""
Inbound webhook handlers.
"""
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django_fixmystreet.fixmystreet.models import FMSUser, Report, ReportAttachment, ReportComment, ReportEventLog
from django_fixmystreet.fixmystreet.utils import check_responsible_permission, check_contractor_permission
class NotLinkedWithThirdPartyError(Exception): pass
class ThirdPartyNotAuthorizedError(Exception): pass
class InvalidReportStatusError(Exception): pass
class BadRequestError(Exception): pass
class ReportAcceptInWebhookMixin(object):
ACTION_MESSAGE = u""
def run(self):
self._validate()
context = {
"action_msg": self.ACTION_MESSAGE,
"reference_id": self._data["reference_id"],
"comment": self._data["comment"],
}
self._add_comment(context)
def _validate(self):
super(ReportAcceptInWebhookMixin, self)._validate()
# Required fields.
if not self._data.get("reference_id"):
raise BadRequestError(u"'data.referenceId' is required.")
class ReportRejectInWebhookMixin(object):
ACTION_MESSAGE = u""
d | ef run(self):
self._validate()
context = {
"action_msg": self.ACTION_MESSAGE,
"comment": self._data["comment"],
}
self._add_comment(context)
def _validate(self):
super(ReportRejectInWebhookMixin, self)._validate()
# Required fields.
if not self._data.get("comment"):
| raise BadRequestError(u"'data.comment' is required.")
class ReportCloseInWebhookMixin(object):
ACTION_MESSAGE = u""
def run(self):
self._validate()
context = {
"action_msg": self.ACTION_MESSAGE,
"reference_id": self._data["reference_id"],
"comment": self._data["comment"],
}
self._add_comment(context)
def _validate(self):
super(ReportCloseInWebhookMixin, self)._validate()
# Required fields.
if not self._data.get("reference_id"):
raise BadRequestError(u"'data.referenceId' is required.")
class AbstractBaseInWebhook(object):
"""
Abstract inbound webhook handler. Every inbound webhook must derive from this class.
Class naming convention: ``<Resource><Hook><Action>InWebhook``.
"""
def __init__(self, meta, data, user=None):
self._meta = meta
self._data = data
self._user = user
def run(self):
raise NotImplementedError()
class AbstractReportInWebhook(AbstractBaseInWebhook):
"""Abstract inbound webhook handler for ``report.*.*``."""
def __init__(self, meta, data, user=None):
super(AbstractReportInWebhook, self).__init__(meta, data, user=user)
self._report = Report.objects.get(pk=meta["id"])
self._third_party = None
def _add_comment(self, context):
context["action_msg"] = context["action_msg"].format(third_party=self._third_party.name)
formatted_comment = render_to_string("webhooks/report_comment.txt", context)
fms_user = FMSUser.objects.get(pk=self._user.id)
comment = ReportComment(
report=self._report, text=formatted_comment, type=ReportAttachment.DOCUMENTATION, created_by=fms_user
)
comment.save()
def _user_has_permission(self):
raise NotImplementedError()
def _validate(self):
if self._third_party is None:
raise NotLinkedWithThirdPartyError(u"Report not linked with a third-party.")
if not self._report.is_in_progress():
raise InvalidReportStatusError(u"Report not in a valid state.")
if not self._user_has_permission():
raise ThirdPartyNotAuthorizedError(u"No authorization for this report.")
# Required fields.
if not self._meta.get("id"):
raise BadRequestError(u"'meta.id' is required.")
class AbstractReportAssignmentInWebhook(AbstractReportInWebhook):
"""Abstract inbound webhook handler for ``report.assignment.*``."""
def __init__(self, meta, data, user=None):
super(AbstractReportAssignmentInWebhook, self).__init__(meta, data, user=user)
self._third_party = self._report.contractor
def _user_has_permission(self):
return check_contractor_permission(self._user, self._report)
class ReportAssignmentAcceptInWebhook(ReportAcceptInWebhookMixin, AbstractReportAssignmentInWebhook):
"""Inbound webhook handler for ``report.assignment.accept``."""
ACTION_MESSAGE = _(u"Report assignment was accepted by {third_party}.")
class ReportAssignmentRejectInWebhook(ReportRejectInWebhookMixin, AbstractReportAssignmentInWebhook):
"""Inbound webhook handler for ``report.assignment.reject``."""
ACTION_MESSAGE = _(u"Report assignment was rejected by {third_party}.")
class ReportAssignmentCloseInWebhook(ReportCloseInWebhookMixin, AbstractReportAssignmentInWebhook):
"""Inbound webhook handler for ``report.assignment.close``."""
ACTION_MESSAGE = _(u"Report assignment was closed by {third_party}.")
class AbstractReportTransferInWebhook(AbstractReportInWebhook):
"""Abstract inbound webhook handler for ``report.transfer.*``."""
def __init__(self, meta, data, user=None):
super(AbstractReportTransferInWebhook, self).__init__(meta, data, user=user)
self._third_party = self._report.responsible_department
def _user_has_permission(self):
return check_responsible_permission(self._user, self._report)
class ReportTransferAcceptInWebhook(ReportAcceptInWebhookMixin, AbstractReportTransferInWebhook):
"""Inbound webhook handler for ``report.transfer.accept``."""
ACTION_MESSAGE = _(u"Report transfer was accepted by {third_party}.")
class ReportTransferRejectInWebhook(ReportRejectInWebhookMixin, AbstractReportTransferInWebhook):
"""Inbound webhook handler for ``report.transfer.reject``."""
ACTION_MESSAGE = _(u"Report transfer was rejected by {third_party}.")
def run(self):
super(ReportTransferRejectInWebhook, self).run()
self._report.responsible_department = ReportEventLog.objects.filter(
report=self._report,
organisation=self._report.responsible_entity,
event_type=ReportEventLog.MANAGER_ASSIGNED
).latest("event_at").related_old
self._report.responsible_entity = self._report.responsible_department.dependency
self._report.status = Report.MANAGER_ASSIGNED
self._report.save()
class ReportTransferCloseInWebhook(ReportCloseInWebhookMixin, AbstractReportTransferInWebhook):
"""Inbound webhook handler for ``report.transfer.close``."""
ACTION_MESSAGE = _(u"Report transfer was closed by {third_party}.")
def run(self):
super(ReportTransferCloseInWebhook, self).run()
self._report.close()
|
#!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import sys
import argparse
import ConfigParser
from vnc_api.vnc_api import *
class EncapsulationProvision(object):
def __init__(self, args_str=None):
self._args = None
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
self._vnc_lib = VncApi(
self._args.admin_user, self._args.admin_password,
self._args.admin_tenant_name,
self._args.api_server_ip,
self._args.api_server_port, '/')
encap_obj=EncapsulationPrioritiesType(encapsulation=self._args.encap_priority.split(","))
try:
current_config=self._vnc_lib.global_vrouter_config_read(
fq_name=['default-global-system-config',
'default-global-vrouter-config'])
except Exception as e:
if self._args.oper == "add":
conf_obj=GlobalVrouterConfig(encapsulation_priorities=encap_obj,vxlan_network_identifier_mode=self._args.vxlan_vn_id_mode)
result=self._vnc_lib.global_vrouter_config_create(conf_obj)
print 'Created.UUID is %s'%(result)
return
current_linklocal=current_config.get_linklocal_services()
encapsulation_priorities=encap_obj
vxlan_network_identifier_mode=current_config.get_vxlan_network_identifier_mode()
if self._args.oper != "add":
encap_obj=EncapsulationPrioritiesType(encapsulation=[])
conf_obj=GlobalVrouterConfig(linklocal_services=current_linklocal,
encapsulation_priorities=encap_obj)
else :
conf_obj=GlobalVrouterConfig(linklocal_services=current_linklocal,
encapsulation_priorities=encapsulation_priorities,
vxlan_network_identifier_mode=self._args.vxlan_vn_id_mode)
result=self._vnc_lib.global_vrouter_config_update(conf_obj)
print 'Updated.%s'%(result)
# end __init__
def _parse_args(self, args_str):
'''
Eg. python provision_encap.py
--api_server_ip 127.0.0.1
--api_server_port 8082
| --encap_priority "MPLSoUDP,MPLSoGRE,VXLAN"
--vxlan_vn_id_mode "automatic"
--oper <add | delete>
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'api_server_ip': '127.0.0.1',
'api_server_port': '8082',
'oper': 'add',
'encap_priority': 'MPLSoUDP,MPLSoGRE,VXLAN',
'vxlan_vn_id_mode' : 'automatic'
}
ksopts = {
'admin_user': 'user1',
'admin_password': 'password1',
'admin_tenant_name': 'admin'
}
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("DEFAULTS")))
if 'KEYSTONE' in config.sections():
ksopts.update(dict(config.items("KEYSTONE")))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
defaults.update(ksopts)
parser.set_defaults(**defaults)
parser.add_argument(
"--api_server_ip", help="IP address of api server")
parser.add_argument("--api_server_port", help="Port of api server")
parser.add_argument(
"--encap_priority", help="List of Encapsulation priority")
parser.add_argument(
"--vxlan_vn_id_mode", help="Virtual Network id type to be used")
parser.add_argument(
"--oper", default='add',help="Provision operation to be done(add or delete)")
parser.add_argument(
"--admin_user", help="Name of keystone admin user")
parser.add_argument(
"--admin_password", help="Password of keystone admin user")
self._args = parser.parse_args(remaining_argv)
if not self._args.encap_priority:
parser.error('encap_priority is required')
# end _parse_args
# end class EncapsulationProvision
def main(args_str=None):
EncapsulationProvision(args_str)
# end main
if __name__ == "__main__":
main()
| |
ify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Some unit tests for the EC2Connection
"""
import unittest
import time
import telnetlib
import socket
from nose.plugins.attrib import attr
from boto.ec2.connection import EC2Connection
from boto.exception import EC2ResponseError
class EC2ConnectionTest(unittest.TestCase):
ec2 = True
@attr('notdefault')
def test_launch_permissions(self):
# this is my user_id, if you want to run these tests you should
# replace this with yours or they won't work
user_id = '963068290131'
print '--- running EC2Connection tests ---'
c = EC2Connection()
# get list of private AMI's
rs = c.get_all_images(owners=[user_id])
assert len(rs) > 0
# now pick the first one
image = rs[0]
# temporarily make this image runnable by everyone
status = image.set_launch_permissions(group_names=['all'])
assert status
d = image.get_launch_permissions()
assert 'groups' in d
assert len(d['groups']) > 0
# now remove that permission
status = image.remove_launch_permissions(group_names=['all'])
assert status
time.sleep(10)
d = image.get_launch_permissions()
assert 'groups' not in d
def test_1_basic(self):
# create 2 new security groups
c = EC2Connection()
group1_name = 'test-%d' % int(time.time())
group_desc = 'This is a security group created during unit testing'
g | roup1 = c.create_security_group(group1_name, group_desc)
time.sleep(2)
group2 | _name = 'test-%d' % int(time.time())
group_desc = 'This is a security group created during unit testing'
group2 = c.create_security_group(group2_name, group_desc)
# now get a listing of all security groups and look for our new one
rs = c.get_all_security_groups()
found = False
for g in rs:
if g.name == group1_name:
found = True
assert found
# now pass arg to filter results to only our new group
rs = c.get_all_security_groups([group1_name])
assert len(rs) == 1
# try some group to group authorizations/revocations
# first try the old style
status = c.authorize_security_group(group1.name,
group2.name,
group2.owner_id)
assert status
status = c.revoke_security_group(group1.name,
group2.name,
group2.owner_id)
assert status
# now try specifying a specific port
status = c.authorize_security_group(group1.name,
group2.name,
group2.owner_id,
'tcp', 22, 22)
assert status
status = c.revoke_security_group(group1.name,
group2.name,
group2.owner_id,
'tcp', 22, 22)
assert status
# now delete the second security group
status = c.delete_security_group(group2_name)
# now make sure it's really gone
rs = c.get_all_security_groups()
found = False
for g in rs:
if g.name == group2_name:
found = True
assert not found
group = group1
# now try to launch apache image with our new security group
rs = c.get_all_images()
img_loc = 'ec2-public-images/fedora-core4-apache.manifest.xml'
for image in rs:
if image.location == img_loc:
break
reservation = image.run(security_groups=[group.name])
instance = reservation.instances[0]
while instance.state != 'running':
print '\tinstance is %s' % instance.state
time.sleep(30)
instance.update()
# instance in now running, try to telnet to port 80
t = telnetlib.Telnet()
try:
t.open(instance.dns_name, 80)
except socket.error:
pass
# now open up port 80 and try again, it should work
group.authorize('tcp', 80, 80, '0.0.0.0/0')
t.open(instance.dns_name, 80)
t.close()
# now revoke authorization and try again
group.revoke('tcp', 80, 80, '0.0.0.0/0')
try:
t.open(instance.dns_name, 80)
except socket.error:
pass
# now kill the instance and delete the security group
instance.terminate()
# check that state and previous_state have updated
assert instance.state == 'shutting-down'
assert instance.state_code == 32
assert instance.previous_state == 'running'
assert instance.previous_state_code == 16
# unfortunately, I can't delete the sg within this script
#sg.delete()
# create a new key pair
key_name = 'test-%d' % int(time.time())
status = c.create_key_pair(key_name)
assert status
# now get a listing of all key pairs and look for our new one
rs = c.get_all_key_pairs()
found = False
for k in rs:
if k.name == key_name:
found = True
assert found
# now pass arg to filter results to only our new key pair
rs = c.get_all_key_pairs([key_name])
assert len(rs) == 1
key_pair = rs[0]
# now delete the key pair
status = c.delete_key_pair(key_name)
# now make sure it's really gone
rs = c.get_all_key_pairs()
found = False
for k in rs:
if k.name == key_name:
found = True
assert not found
# short test around Paid AMI capability
demo_paid_ami_id = 'ami-bd9d78d4'
demo_paid_ami_product_code = 'A79EC0DB'
l = c.get_all_images([demo_paid_ami_id])
assert len(l) == 1
assert len(l[0].product_codes) == 1
assert l[0].product_codes[0] == demo_paid_ami_product_code
print '--- tests completed ---'
def test_dry_run(self):
c = EC2Connection()
dry_run_msg = 'Request would have succeeded, but DryRun flag is set.'
try:
rs = c.get_all_images(dry_run=True)
self.fail("Should have gotten an exception")
except EC2ResponseError, e:
self.assertTrue(dry_run_msg in str(e))
try:
rs = c.run_instances(
image_id='ami-a0cd60c9',
instance_type='m1.small',
dry_run=True
)
self.fail("Should have gotten an exception")
except EC2ResponseError, e:
self.assertTrue(dry_run_msg in str(e))
# Need an actual instance for the rest of this...
rs = c.run_instances(
image_id='ami-a0cd60c9',
instance_type='m1.small'
)
time.sleep(120)
try:
rs = c.stop_instances(
instance_ids=[rs.instances[0].id],
dry_run=True
)
self.fail("Should have gotten an exception")
except EC2ResponseError, e:
self.assertTrue(dry_run_msg in str(e))
try:
rs |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Portions Copyright © 2013 Hortonworks, Inc.
import logging
import os
from django.contrib.auth.models import User
from django.core import management
from django.core.management.base import NoArgsCommand
from django.utils.translation import ugettext as _
from hadoop import cluster
from hadoop.fs.hadoopfs import Hdfs
from liboozie.conf import REMOTE_DEPLOYMENT_DIR
from oozie.conf import LOCAL_SAMPLE_DATA_DIR, LOCAL_SAMPLE_DIR, REMOTE_SAMPLE_DIR
LOG = logging.getLogger(__name__)
class Command(NoArgsCommand):
def handle_noargs(self, **options):
fs = cluster.get_hdfs()
remote_dir = create_directories(fs)
# Copy examples binaries
for name in os.listdir(LOCAL_SAMPLE_DIR.get()):
local_dir = fs.join(LOCAL_SAMPLE_DIR.get(), name)
remote_data_dir = fs.join(remote_dir, name)
LOG.info(_('Copying examples %(local_dir)s to %(remote_data_dir)s\n') % {
'local_dir': local_dir, 'remote_data_dir': remote_data_dir})
fs.do_as_user(fs.DEFAULT_USER, fs.copyFromLocal, | local | _dir, remote_data_dir)
# Copy sample data
local_dir = LOCAL_SAMPLE_DATA_DIR.get()
remote_data_dir = fs.join(remote_dir, 'data')
LOG.info(_('Copying data %(local_dir)s to %(remote_data_dir)s\n') % {
'local_dir': local_dir, 'remote_data_dir': remote_data_dir})
fs.do_as_user(fs.DEFAULT_USER, fs.copyFromLocal, local_dir, remote_data_dir)
# Load jobs
sample, created = User.objects.get_or_create(username='sample')
management.call_command('loaddata', 'initial_oozie_examples.json', verbosity=2)
from oozie.models import Job
Job.objects.filter(owner__id=1100713).update(owner=sample) # 11OOZIE
def create_directories(fs):
# If needed, create the remote home, deployment and data directories
directories = (REMOTE_DEPLOYMENT_DIR.get(), REMOTE_SAMPLE_DIR.get())
for directory in directories:
if not fs.do_as_user("hdfs", fs.exists, directory):
remote_home_dir = Hdfs.join('/user', "hdfs")
if directory.startswith(remote_home_dir):
# Home is 755
fs.do_as_user("hdfs", fs.create_home_dir, remote_home_dir)
# Shared by all the users
fs.do_as_user("hdfs", fs.mkdir, directory, 511)
fs.do_as_user("hdfs", fs.chmod, directory, 511) # To remove after https://issues.apache.org/jira/browse/HDFS-3491
return REMOTE_SAMPLE_DIR.get()
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Hardware Proxy',
'category': 'IOT',
'sequence': 6,
'summary': 'Connect the Web Client to Hardware Peripherals',
'website': 'https://www.odoo.com/page/iot',
'd | escription': """
Hardware Poxy
=============
This module allows you to remotely use peripherals connected to this server.
T | his modules only contains the enabling framework. The actual devices drivers
are found in other modules that must be installed separately.
""",
'installable': False,
}
|
from bokeh.plotting import | figure, show
# prepare some data
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
# create a new plot with a title and axis labels
p = figure(title="Simple line example", x_axis_label="x", y_axis_label="y")
# add a line renderer with legend and line thickness
p.line(x, y, legend_label="Temp.", line_width=2)
# show the results
s | how(p)
|
dule_location, output_key,
name):
"""Mock waveform-to-embedding computation."""
del audio_samples, sample_rate, module_location, output_key, name
return np.zeros(custom_call_shape, dtype=np.float32)
# Construct the tf.train.Example test data.
ex = tf.train.Example()
ex.features.feature[audio_key].float_li | st.value.extend(
np.zeros(2000, np.float32))
if sample_rate_key:
ex.features.feature[sample_rate_key].int64_list.value.append(8000)
old_k = 'oldkey'
do_fn = beam_dofns.ComputeEmbeddingMapFn(
name='module_name',
module='@loc',
output_key='unnecessary',
audio_key=audio_key,
sample_rate_key=sample_rate_key,
sample_rate=sample_rate,
average | _over_time=average_over_time,
module_call_fn=test_call_fn,
setup_fn=lambda _: None)
do_fn.setup()
new_k, new_v = next(do_fn.process((old_k, ex)))
self.assertEqual(new_k, old_k)
expected_shape = (
1, custom_call_shape[1]) if average_over_time else custom_call_shape
self.assertEqual(new_v.shape, expected_shape)
@parameterized.parameters(
{'average_over_time': True, 'sample_rate_key': 's', 'sample_rate': None},
{'average_over_time': False, 'sample_rate_key': 's', 'sample_rate': None},
{'average_over_time': False, 'sample_rate_key': None, 'sample_rate': 5},
) # pylint:disable=g-unreachable-test-method
def disable_test_compute_embedding_map_fn_tflite(
self, average_over_time, sample_rate_key, sample_rate):
# Establish required key names.
audio_key = 'audio_key'
# Construct the tf.train.Example test data.
ex = tf.train.Example()
ex.features.feature[audio_key].float_list.value.extend(
np.zeros(2000, np.float32))
if sample_rate_key:
ex.features.feature[sample_rate_key].int64_list.value.append(8000)
old_k = 'oldkey'
def _feature_fn(x, s):
return tf.expand_dims(
tf_frontend.compute_frontend_features(x, s, frame_hop=17),
axis=-1).numpy().astype(np.float32)
do_fn = beam_dofns.ComputeEmbeddingMapFn(
name='module_name',
module='file.tflite',
output_key=0,
audio_key=audio_key,
sample_rate_key=sample_rate_key,
sample_rate=sample_rate,
average_over_time=average_over_time,
feature_fn=_feature_fn,
module_call_fn=_s2e,
setup_fn=build_tflite_interpreter_dummy)
do_fn.setup()
new_k, new_v = next(do_fn.process((old_k, ex)))
self.assertEqual(new_k, old_k)
expected_shape = (1, BASE_SHAPE_[1]) if average_over_time else BASE_SHAPE_
self.assertEqual(new_v.shape, expected_shape)
@parameterized.parameters([
{'chunk_len': 0, 'average_over_time': True, 'emb_on_chnks': True},
{'chunk_len': 8000, 'average_over_time': True, 'emb_on_chnks': True},
{'chunk_len': 0, 'average_over_time': True, 'emb_on_chnks': False},
{'chunk_len': 8000, 'average_over_time': True, 'emb_on_chnks': False},
])
def test_chunk_audio(self, chunk_len, average_over_time, emb_on_chnks):
dofn = beam_dofns.ChunkAudioAndComputeEmbeddings(
name='all',
module='dummy_name',
output_key=['okey1', 'okey2'],
embedding_names=['em1', 'em2'],
audio_key='audio',
label_key='label',
speaker_id_key='speaker_id',
sample_rate_key=None,
sample_rate=16000,
average_over_time=average_over_time,
chunk_len=chunk_len,
compute_embeddings_on_chunked_audio=emb_on_chnks,
setup_fn=lambda _: MockModule(['okey1', 'okey2']))
dofn.setup()
for l in [8000, 16000, 32000]:
k = f'key_{l}'
ex = make_tfexample(l)
for i, (kn, aud, lbl, spkr, embs_d) in enumerate(dofn.process((k, ex))):
self.assertEqual(f'{k}_{i}', kn)
if chunk_len:
expected_chunk_len = chunk_len if l > chunk_len else l
else:
expected_chunk_len = l
self.assertLen(aud, expected_chunk_len)
self.assertEqual(lbl, b'dummy_lbl')
self.assertEqual(spkr, b'dummy_spkr')
for _, emb in embs_d.items():
self.assertEqual(emb.shape, (1 if average_over_time else 5, 10))
# Now run the next stage of the pipeline on it.
# TODO(joelshor): Add correctness checks on the output.
data_prep_utils.chunked_audio_to_tfex(
(kn, aud, lbl, spkr, embs_d),
delete_audio_from_output=True,
pass_through_normalized_audio=False,
chunk_len=chunk_len,
embedding_length=10)
@parameterized.parameters([
{'emb_on_chnks': True},
{'emb_on_chnks': False},
])
def test_chunked_correctness(self, emb_on_chnks):
class MockModuleConstant(object):
def __init__(self, output_keys):
self.signatures = {'waveform': self._fn}
self.output_keys = output_keys
def _fn(self, waveform, paddings):
del paddings
print(f'waveform.shape: {waveform.shape}')
bs, l = waveform.shape
tdim = l / 1000
assert tdim == int(tdim)
ones = tf.ones([1, int(tdim), 10], tf.float32)
assert waveform[0, 0].numpy().size == 1, waveform[0, 0]
e = tf.concat([ones * float(waveform.numpy()[i, 0]) for i in range(bs)],
axis=0)
return {k: e for k in self.output_keys}
dofn = beam_dofns.ChunkAudioAndComputeEmbeddings(
name='all',
module='dummy_name',
output_key=['okey'],
embedding_names=['em'],
audio_key='audio',
label_key='label',
speaker_id_key='speaker_id',
sample_rate_key=None,
sample_rate=16000,
average_over_time=True,
chunk_len=8000,
compute_embeddings_on_chunked_audio=emb_on_chnks,
setup_fn=lambda _: MockModuleConstant(['okey']))
dofn.setup()
k = 'key_8000'
ex = make_tfexample(16000)
os = list(dofn.process((k, ex)))
self.assertLen(os, 2)
# First chunk.
(kn, aud, _, _, embs_d) = os[0]
self.assertEqual(f'{k}_0', kn)
self.assertLen(aud, 8000)
self.assertLen(embs_d, 1)
emb = embs_d['em']
self.assertEqual(emb.shape, (1, 10))
np.testing.assert_equal(emb, 0.0)
# Second chunk.
(kn, aud, _, _, embs_d) = os[1]
self.assertEqual(f'{k}_1', kn)
self.assertLen(aud, 8000)
self.assertLen(embs_d, 1)
emb = embs_d['em']
self.assertEqual(emb.shape, (1, 10))
np.testing.assert_equal(emb, 8000 if emb_on_chnks else 0)
@parameterized.parameters(
[{'chunk_len': 0, 'average_over_time': True},
{'chunk_len': 8000, 'average_over_time': True},
{'chunk_len': 0, 'average_over_time': False},
{'chunk_len': 8000, 'average_over_time': False},
])
def test_multiple_embeddings(self, chunk_len, average_over_time):
dofn = beam_dofns.ComputeMultipleEmbeddingsFromSingleModel(
name='all',
module='dummy_name',
output_key=['k1', 'k2'], # Sneak the list in.
audio_key='audio',
sample_rate_key=None,
sample_rate=16000,
average_over_time=average_over_time,
feature_fn=None,
embedding_names=['em1', 'em2'],
embedding_length=10,
chunk_len=chunk_len,
setup_fn=lambda _: MockModule(['k1', 'k2'])
)
dofn.setup()
for l in [8000, 16000, 32000]:
k = f'key_{l}'
ex = make_tfexample(l)
kn, exn, emb_dict = list(dofn.process((k, ex)))[0]
self.assertEqual(k, kn)
self.assertLen(emb_dict, 2)
self.assertSetEqual(set(emb_dict.keys()), set(['em1', 'em2']))
# Now run the next stage of the pipeline on it.
# TODO(joelshor): Add correctness checks on the output.
data_prep_utils.combine_multiple_embeddings_to_tfex(
(kn, exn, emb_dict),
delete_audio_from_output=True,
pass_through_normalized_audio=True,
audio_key='audio',
label_key='label',
speaker_id_key='speaker_id')
@parameterized.parameters([
{'process_fn': 'ComputeEmbeddingMapFn', 'chunk_len': 0},
{'process_fn': 'ComputeMultipleEm |
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.utils.translation import gettext_lazy as _
from zds.api.validators import Validator
from zds.member.models import Profile
class ParticipantsUserValidator(Validator):
can_be_empty = False
def validate_participants(self, value):
msg = None
if value or self.can_be_empty:
for participant in value:
if participant.username == self.get_current_user().username:
msg = _("Vous ne pouvez pas vous écrire à vous-même !")
try:
current = get_object_or_404(Profile, user__username=participant)
if not Profile.objects.contactable_members().filter(pk=current.pk).exists():
msg = _("Vous avez tenté d'ajouter un utilisateur injoignable.")
except Http4 | 04:
msg = _(f"Un des participants saisi est introuvable ({participant}).")
else:
msg = _("Vous devez spécifier des participants.")
if msg is not None:
self.throw_error("participants", msg)
return value
def get_current_user(self):
raise NotImplementedError("`get_current_user()` must be implemented.")
class ParticipantsStringValidator(Validator):
"""
Validates particip | ants field of a MP.
"""
def validate_participants(self, value, username):
"""
Checks about participants.
:param value: participants value
:return: participants value
"""
msg = None
if value:
participants = value.strip()
if participants != "":
if len(participants) == 1 and participants[0].strip() == ",":
msg = _("Vous devez spécfier des participants valides.")
for participant in participants.split(","):
participant = participant.strip()
if not participant:
continue
if participant.strip().lower() == username.lower():
msg = _("Vous ne pouvez pas vous écrire à vous-même !")
try:
current = get_object_or_404(Profile, user__username=participant)
if not Profile.objects.contactable_members().filter(pk=current.pk).exists():
msg = _("Vous avez tenté d'ajouter un utilisateur injoignable.")
except Http404:
msg = _(f"Un des participants saisi est introuvable ({participant}).")
else:
msg = _("Le champ participants ne peut être vide.")
if msg is not None:
self.throw_error("participants", msg)
return value
class TitleValidator(Validator):
"""
Validates title field of a MP.
"""
def validate_title(self, value):
"""
Checks about title.
:param value: title value
:return: title value
"""
msg = None
if value:
if not value.strip():
msg = _("Le champ titre ne peut être vide.")
if msg is not None:
self.throw_error("title", msg)
return value
class TextValidator(Validator):
"""
Validates text field of a MP.
"""
def validate_text(self, value):
"""
Checks about text.
:param value: text value
:return: text value
"""
msg = None
if value:
if not value.strip():
msg = _("Le champ text ne peut être vide.")
if msg is not None:
self.throw_error("text", msg)
return value
|
import nose
def start_response_111(status, headers):
for header in headers:
if header[0] == 'Content-Type':
assert header[1] == 'application/vnd.ogc.wms_xml'
assert status == '200 OK'
def start_response_130(status, headers):
for header in headers:
if header[0] == 'Content-Type':
assert header[1] == 'text/xml'
assert status == '200 OK'
def start_response_check_404(status, headers):
print('status code: %s' % status)
assert status == '404 NOT FOUND'
def get_wsgiapp():
import os
from ogcserver.wsgi import WSGIApp
base_path, tail = os.path.split(__file__)
wsgi_app = WSGIApp(os.path.join(base_path, 'ogcserver.conf'))
return wsgi_app
def get_environment():
environ = {}
environ['HTTP_HOST'] = "localhost"
environ['SCRIPT_NAME'] = __name__
environ['PATH_INFO'] = '/'
return environ
def test_get_capabilities():
wsgi_app | = get_wsgiapp()
environ = get_environment()
environ['QUERY_STRING'] = "EXCEPTION=application/vnd.ogc.se_xml&VERSION=1.1.1&SERVICE=WMS&REQUEST=GetCapabilities&"
response = wsgi_app.__call__(environ, start_response_111)
content = ''.join(response)
environ['QUERY_STRING'] = "EXCEPTION=applicat | ion/vnd.ogc.se_xml&VERSION=1.3.0&SERVICE=WMS&REQUEST=GetCapabilities&"
response = wsgi_app.__call__(environ, start_response_130)
''.join(response)
def test_bad_query():
wsgi_app = get_wsgiapp()
environ = get_environment()
environ['QUERY_STRING'] = "EXCEPTION=application/vnd.ogc.se_xml&VERSION=1.1.1&SERVICE=WMS&REQUEST=GetMap&"
response = wsgi_app.__call__(environ, start_response_check_404)
environ['QUERY_STRING'] = "EXCEPTION=application/vnd.ogc.se_xml&VERSION=1.3.0&SERVICE=WMS&REQUEST=GetMap&"
response = wsgi_app.__call__(environ, start_response_check_404)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import url
from channels.routing import ProtocolTypeRouter, URLRouter
from channels.auth import AuthMiddlewareStack
from ch | annels.staticfiles import StaticFilesWrapper, StaticFilesHandler
from chat.consumers import ChatJSONConsumer
from rating.consumers import RatingJSONConsumer
application = StaticFilesWrapper(ProtocolTypeRouter({ |
'websocket': AuthMiddlewareStack(
URLRouter([
url('^ws/chat/$', ChatJSONConsumer),
url('^ws/rating/$', RatingJSONConsumer),
])
),
}))
|
import pp
from pp.mask.merge_json import merge_json
from pp.mask.merge_markdown import merge_markdown
from pp.mask.merge_te | st_metadata import merge_test_metadata
from pp.mask.write_labels import write_labels
def merge_metadata(gdspath, labels_prefix="opt", label_layer=pp.LAYER.LABEL, **kwargs):
mdpath = gdspath.with_suffix(".md")
js | onpath = gdspath.with_suffix(".json")
build_directory = gdspath.parent.parent
doe_directory = build_directory / "doe"
write_labels(gdspath=gdspath, prefix=labels_prefix, label_layer=label_layer)
merge_json(doe_directory=doe_directory, jsonpath=jsonpath, **kwargs)
merge_markdown(reports_directory=doe_directory, mdpath=mdpath)
merge_test_metadata(gdspath, labels_prefix=labels_prefix)
if __name__ == "__main__":
gdspath = pp.CONFIG["samples_path"] / "mask" / "build" / "mask" / "mask.gds"
print(gdspath)
merge_metadata(gdspath)
|
name.
This will raise a Zenity File Selection Dialog. It will return a list with
the selected files or None if the user hit cancel.
multiple - True to allow the user to select multiple files.
sep - Token to use as the path separator when parsing Zenity's return
string.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = []
if multiple:
args.append('--multiple')
if sep != '|':
args.append('--separator=%s' % sep)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--file-selection', *args)
if p.wait() == 0:
return p.stdout.read()[:-1].split('|')
def GetDirectory(multiple=False, selected=None, sep=None, **kwargs):
"""Prompt the user for a directory.
This will raise a Zenity Directory Selection Dialog. It will return a
list with the selected directories or None if the user hit cancel.
multiple - True to allow the user to select multiple directories.
selected - Path to the directory to be selected on startup.
sep - Token to use as the path separator when parsing Zenity's return
string.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--directory']
if multiple:
args.append('--multiple')
if selected:
if not path.lexists(selected):
raise ValueError("File %s does not exist!" % selected)
args.append('--filename=%s' % selected)
if sep:
args.append('--separator=%s' % sep)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--file-selection', *args)
if p.wait() == 0:
return p.stdout.read().strip().split('|')
def GetSavename(default=None, **kwargs):
"""Prompt the user for a filename to save as.
This will raise a Zenity Save As Dialog. It will return the name to save
a file as or None if the user hit cancel.
default - The default name that should appear in the save as dialog.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--save']
if default:
args.append('--filename=%s' % default)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--file-selection', *args)
if p.wait() == 0:
return p.stdout.read().strip().split('|')
def Notification(text=None, window_icon=None, **kwargs):
"""Put an icon in the notification area.
This will put an icon in the notification area and return when the user
clicks on it.
text - The tooltip that will show when the user hovers over it.
window_icon - The stock icon ("question", "info", "warning", "error") or
path to the icon to show.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = []
if text:
args.append('--text=%s' % text)
if window_icon:
args.append('--window-icon=%s' % window_icon)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--notification', *args)
p.wait()
def List(column_names, title=None, boolstyle=None, editable=False,
select_col=None, sep='|', data=[], **kwargs):
"""Present a list of items to select.
This will raise a Zenity List Dialog populated with the colomns and rows
specified and return either the cell or row that was selected or None if
the user hit cancel.
column_names - A tuple or list containing the names of the columns.
title - The title of the dialog box.
boolstyle - Whether the first columns should be a bool option ("checklist",
"radiolist") or None if it should be a text field.
editable - True if the user can edit the cells.
select_col - The column number of the selected cell to return or "ALL" to
return the entire row.
sep - Token to use as the row separator when parsing Zenity's return.
Cells should not contain this token.
data - A list or tuple of tuples that contain the cells in the row. The
size of the row's tuple must be equal to the number of columns.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = []
for column in column_names:
args.append('--column=%s' % column)
if title:
args.append('--title=%s' % title)
if boolstyle:
if not (boolstyle == 'checklist' or boolstyle == 'radiolist'):
raise ValueError('"%s" is not a proper boolean column style.'
% boolstyle)
args.append('--' + boolstyle)
if editable:
args.append('--editable')
if select_col:
args.append('--print-column=%s' % select_col)
if sep != '|':
args.append('--separator=%s' % sep)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
for datum in chain(*data):
args.append(str(datum))
p = run_zenity('--list', *args)
if p.wait() == 0:
return p.stdout.read().strip().split(sep)
def ErrorMessage(text, **kwargs):
"""Show an error message dialog to the user.
This will raise a Zenity Error Dialog with a description of the error.
text - A description of the error.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--text=%s' % text]
for generic_args in kwargs_hel | per(kwargs):
args.append('--%s=%s' % generic_args)
run_zenity('--error', *args).wait()
def InfoMessage(text, **kwargs):
"""Show an info message dialog to the user.
|
This will raise a Zenity Info Dialog displaying some information.
text - The information to present to the user.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--text=%s' % text]
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
run_zenity('--info', *args).wait()
def Question(text, **kwargs):
"""Ask the user a question.
This will raise a Zenity Question Dialog that will present the user with an
OK/Cancel dialog box. It returns True if the user clicked OK; False on
Cancel.
text - The question to ask.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--text=%s' % text]
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
return run_zenity('--question', *args).wait() == 0
def Warning(text, **kwargs):
"""Show a warning message dialog to the user.
This will raise a Zenity Warning Dialog with a description of the warning.
It returns True if the user clicked OK; False on cancel.
text - A description of the warning.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--text=%s' % text]
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
return run_zenity('--warning', *args).wait() == 0
def Progress(text='', percentage=0, auto_close=False, pulsate=False, no_cancel=False, **kwargs):
"""Show a progress dialog to the user.
This will raise a Zenity Progress Dialog. It returns a callback that
accepts two arguments. The first is a numeric value of the percent
complete. The second is a message about the progress.
NOTE: This function sends the SIGHUP signal if the user hits the cancel
button. You must connect to this signal if you do not want your
application to exit.
text - The initial message about the progress.
percentage - The initial percentage to set the progress bar to.
auto_close - True if the dialog should clos |
# -*- coding: utf-8 -* | -
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source c | ode is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns(
'',
url(r'^customer/$', views.CustomerEditView.as_view(),
name='customer_edit'),
)
|
import json
import numpy as np
from bokeh.plotting import *
from bokeh.sampledata.olympics2014 import data
from bokeh.objects import ColumnDataSource
data = { d['abbr']: d['medals'] for d in data['data'] if d['medals']['total'] > 0}
# pull out just the data we care about
countries = sorted(
data.keys(),
key=lambda x: data[x]['total'], reverse=True
)
gold = np.array([data[abbr]['gold'] for abbr in countries], dtype=np.float)
silver = np.array([data[abbr]['silver'] for abbr in countries], dtype=np.float)
bronze = np.array([data[abbr]['bronze'] for abbr in countries], dtype=np.float)
# EXERCISE: output static HTML file
output_file('olympics.html')
# EXERCISE: turn on plot hold
hold()
# use the `rect` renderer to display stacked bars of the medal results. Note
# that we set y_range explicitly on the first renderer
rect(x=countries, y=bronze/2, width=0.8, height=bronze, x_range=countries, color="#CD7F32", alpha=0.6,
background_fill='#59636C', title="Olympic Medals by Country (stacked)", tools="",
y_range=[0, max(gold+silver+bronze)], plot_width=800)
rect(x=countries, y=bronze+silver/2, width=0.8, height=silver, x_range=countries, color="silver", alpha=0.6)
# EXERCISE: add a `rect` renderer to stack the gold medal results
rect(x=countries, y=bronze+silver+gold/2, width=0.8, height=gold, x_range=countries, color="gold", alpha=0.6)
# EXERCISE: use grid(), axis(), etc. to style the plot. Some suggestions:
# - remove the grid lines
# - change the major label standoff, and major_tick_out values
# - make the tick labels smaller
# - set the x-axis orientation to vertical, or angled
xgrid().grid_line_color = None
axis().major_label_text_font_size = "8pt"
axis().major_label_standoff = 0
xaxis().major_label_orientation = np.pi/3
xaxis().major_label_standoff = 6
xaxis().major_tick_out = 0
# EXERCISE: create a new figure
figure()
# Categorical percentage coordinates can be used for positioning/grouping
countries_bronze = [c+":0.3" for c in countries]
co | untries_silver = [c+":0.5" for c in countries]
countries_gold = [c+":0.7" for c in countries]
# EXERCISE: re create the medal plot, but this time:
# - do not stack the bars on the y coordinate
# - use countries_gold, etc. to positions the bars on the x coordinate
rect(x=countries_bronze, y=bronze/2, width=0.2, height=bronze, x_range=countries, color="#CD7F32", alpha=0.6,
background_fill='#59636C', title="Olympic Medals by Country (grouped)", tools="",
y_range=[0, max([gold.max(), silve | r.max(), bronze.max()])], plot_width=1000, plot_height=300)
rect(x=countries_silver, y=silver/2, width=0.2, height=silver, x_range=countries, color="silver", alpha=0.6)
rect(x=countries_gold, y=gold/2, width=0.2, height=gold, x_range=countries, color="gold", alpha=0.6)
# EXERCISE: use grid(), axis(), etc. to style the plot. Some suggestions:
# - remove the axis and grid lines
# - remove the major ticks
# - make the tick labels smaller
# - set the x-axis orientation to vertical, or angled
xgrid().grid_line_color = None
axis().major_label_text_font_size = "8pt"
axis().major_label_standoff = 0
xaxis().major_label_orientation = np.pi/3
xaxis().major_label_standoff = 6
xaxis().major_tick_out = 0
show() # show the plot
|
from django.conf.urls import patterns, url
from rest_framework.routers import DefaultRouter
from ..boards.views import BoardHTMLView
from . import views
router = DefaultRouter()
router.register(r'cards', views.CardViewSet)
api_urlpatterns = router.urls
urlpatterns = patterns(
# Prefix
'',
url(r'^$',
BoardHTMLView | .as_view(), name='card_detail'),
url(r'^download/$',
views.CardDownloadHTML | View.as_view(), name='card_download'),
)
|
def | f1():
while True:
x = [] | |
from chai import Chai
import unittest
import dfaker.common_fields as common_fields
import dfaker.tools as tools
class Test_Common_Fields(Chai):
def test_common_fields(self):
""" Test that common fields populate properly"""
name = "bolus"
datatype = {}
timestamp = tools.convert_ISO_to_ep | och('2015-03-03 00:00:00',
'%Y-%m-%d %H:%M:%S')
zonename | = "US/Pacific"
expected = {
'time': '2015-03-03T00:00:00.000Z', # UTC time
'deviceTime': '2015-03-02T16:00:00', # local time
'timezoneOffset': -480,
'deviceId': 'DemoData-123456789',
'uploadId': 'upid_abcdefghijklmnop',
'conversionOffset': 0,
}
result_dict = common_fields.add_common_fields(name, datatype,
timestamp, zonename)
for key in expected.keys():
self.assertEqual(result_dict[key], expected[key])
def suite():
""" Gather all the tests from this module in a test suite """
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(Test_Common_Fields))
return test_suite
mySuit = suite()
runner = unittest.TextTestRunner()
runner.run(mySuit)
|
import uuid
from random import randint
from django.shortcuts import render
from django.http import HttpResponseRedirect
from .models import Url
def index(request):
if request.session.has_key("has_url"):
url = request.session.get("has_url")
del request.session['has_url']
return render(request, "miudo/index.html", locals())
r | eturn render(request, "miudo/index.html", {})
def make_url(request):
if request.method == "POST":
url = None # initial url
url_site = request.POST['url']
url_id = generate_key()
try:
url = Url.objects.get(url_id = url_id)
while url:
url_id = generate_key()
url = Url.objects.g | et(url_id = url_id)
create_url(request, url_id, url_site)
request.session["has_url"] = url_id
except Url.DoesNotExist:
create_url(request, url_id, url_site)
request.session["has_url"] = url_id
return HttpResponseRedirect("/")
def create_url(custom_request, url_id, url_site):
if custom_request.user.is_authenticated():
url = Url.objects.create(url_id = url_id, url_site = url_site,
url_author = custom_request.user)
else:
url = Url.objects.create(url_id = url_id, url_site = url_site)
url.save()
def generate_key():
to_choose = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
url_id = ""
while len(url_id) != 6:
i = randint(0, len(to_choose) - 1)
url_id += to_choose[i]
return url_id
def redirect_url(request, url_id=None):
try:
url = Url.objects.get(url_id = url_id)
url.url_clicked = url.url_clicked + 1
url.save()
except Url.DoesNotExist:
return render(request, "base/page_not_found.html", {})
return HttpResponseRedirect(url.url_site)
|
from django.conf.urls import patterns, url
from views import RecaptchaRegistrationView
urlpatterns = patterns(
'', url(
| r'^register/$',
RecaptchaRegistrationView.as_view(),
name= | 'registration_register'),
)
|
# -*- coding: utf-8 -*-
""" *==LICENSE==*
CyanWorlds.com Engine - MMOG client, server and tools
Copyright (C) 2011 Cyan Worlds, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Additional permissions under GNU GPL version 3 section 7
If you modify this Program, or any covered work, by linking or
combining it with any of RAD Game Tools Bink SDK, Autodesk 3ds Max SDK,
NVIDIA PhysX SDK, Microsoft DirectX SDK, OpenSSL library, Independent
JPEG Group JPEG library, Microsoft Windows Media SDK, or Apple QuickTime SDK
(or a modified version of those libraries),
containing parts covered by the terms of the Bink SDK EULA, 3ds Max EULA,
PhysX SDK EULA, DirectX SDK EULA, OpenSSL and SSLeay licenses, IJG
JPEG Library README, Windows Media SDK EULA, or QuickTime SDK EULA, the
licensors of this Program grant you additional
permission to convey the resulting work. Corresponding Source for a
non-source form of such a combination shall include the source code for
the parts of OpenSSL and IJG JPEG Library used as well as that of the covered
work.
You can contact Cyan Worlds, Inc. by email legal@cyan.com
or by snail mail at:
Cyan Worlds, Inc.
14617 N Newport Hwy
Mead, WA 99021
*==LICENSE==* """
"""
Module: tldnShroomieGate
Age: Teledahn
Date: Feburary 2007
Author: Karl Johnson
"""
from Plasma import *
from PlasmaTypes import *
# define the attributes that will be entered in max
clkLever = ptAttribActivator(1,"clk: Activator for Shroomie Gate")
respLeverPull = ptAttribResponder(2, "resp: Lever Pull", netForce=1)
respGateDown = ptAttribResponder(3, "resp: Gate Down", netForce=1 | )
respGateUp = ptAttribResponder(4, "resp: Gate Up", netForce=1)
class tldnShroomieGate(ptResponder):
def __init__(self):
# run parent class init
ptResponder.__init__(self)
self.id = 5042
|
version = 1
self.version = version
print "__init__tldnShroomieGate v.", version
def OnNotify(self,state,id,events):
if id == clkLever.id and state:
print "tldnShroomieGate:\t---Someone Pulled the Lever"
respLeverPull.run(self.key,avatar=PtFindAvatar(events))
elif id == respLeverPull.id:
ageSDL = PtGetAgeSDL()
PtDebugPrint("tldnShroomieGate:\t---Shroomie Gate Up SDL: %d" % (ageSDL["tldnShroomieGateUp"][0]))
if ageSDL["tldnShroomieGatePowerOn"][0] and self.sceneobject.isLocallyOwned():
if ageSDL["tldnShroomieGateUp"][0]:
respGateDown.run(self.key)
print "tldnShroomieGate:\t---Shroomie Gate Going Down"
else:
respGateUp.run(self.key)
print "tldnShroomieGate:\t---Shroomie Gate Going Up"
ageSDL["tldnShroomieGateUp"] = (not ageSDL["tldnShroomieGateUp"][0],)
def OnServerInitComplete(self):
try:
ageSDL = PtGetAgeSDL()
except:
print "tldnShroomieGate:\tERROR---Cannot find the Teledahn Age SDL"
ageSDL.sendToClients("tldnShroomieGateUp")
ageSDL.setFlags("tldnShroomieGateUp", 1, 1)
ageSDL.setNotify(self.key, "tldnShroomieGateUp", 0.0)
if ageSDL["tldnShroomieGateUp"][0]:
print "tldnShroomieGate:\tInit---Shroomie Gate Up"
respGateUp.run(self.key,fastforward=1)
else:
print "tldnShroomieGate:\tInit---Shroomie Gate Down"
respGateDown.run(self.key,fastforward=1) |
from django.core.management import call_command
from static_precompiler.management.commands.compilestatic import get_scanned_dirs
from static_precompiler.settings import STATIC_ROOT, ROOT, OUTPUT_DIR
import pytest
import os
def test_get_scanned_dirs():
assert get_scanned_dirs() == sorted([
os.path.join(os.path.dirname(__file__), "staticfiles_dir"),
os.path.join(os.path.dirname(__file__), "staticfiles_dir_with_prefix"),
STATIC_ROOT
])
@pytest.mark.django_ | db
def test_compilestatic_command():
call_command("compilestatic")
output_path = os.path.join(ROOT, OUTPUT_DIR)
compiled_files = []
for root, dirs, files in os.walk(output_path):
for filename in files:
compiled_files.append(os.path.join(root[len(output_path):].lstrip("/"), filenam | e))
compiled_files.sort()
assert compiled_files == [
"another_test.js",
"scripts/test.js",
"styles/imported.css",
"styles/stylus/A.css",
"styles/stylus/B/C.css",
"styles/stylus/D.css",
"styles/stylus/E/F.css",
"styles/stylus/E/index.css",
"styles/test.css",
"test-compass.css",
]
|
import os
import subprocess
import sys
__version__ = '0.0.1' |
__author__ = 'Jeroen Seegers'
__license__ = 'MIT'
HOME_DIR = os.path.expanduser('~')
HISTORY_FILE = HOME_DIR + '/.git-history.log'
def ensure_history_file():
"""Ensure the history file exists"""
if not os.path.isfile(HISTORY_FILE) and os.access(HOME_DIR, os.W_OK):
open(HISTORY_FILE, 'a').close()
return True
elif os.path.isfile(HISTORY_FILE) and os.access(HOME_DIR, os.W_OK):
return True
elif os.path.isfile(HISTORY_F | ILE) and not os.access(HOME_DIR, os.W_OK):
return False
else:
return False
def track_history():
arguments = sys.argv[1:]
arguments.insert(0, 'git')
if arguments == ['git', 'history']:
# Show the history so far
with open(HISTORY_FILE, 'r') as f:
print f.read()
f.close()
elif len(arguments) > 1:
# Store command in history
if ensure_history_file():
with open(HISTORY_FILE, 'a') as f:
f.write('{0}\n'.format(' '.join(sys.argv[1:])))
f.close()
# Execute given command
subprocess.call(arguments)
else:
# Show default help text
subprocess.call('git')
if __name__ == '__main__':
track_history()
|
# Generated by Django 2.2.5 on 2019-09-30 13:02
from django.db import migrations
def fix_no | tification_body(apps, schema_editor):
PayUNotification = apps.get_model('club', 'PayUNotification')
for n in PayUNotification.objects.filter(body__startswith='b'):
n.body = n.body[2:-1]
n.save()
class Migration(migrations.Migration):
dependencies = [
('club', '0010_auto_20190529_0946'),
]
operations = [
migrations.RunPython(
fix_notificatio | n_body,
migrations.RunPython.noop,
elidable=True),
]
|
"""
Plugin: Slideshow
*****************
This plugin allows you to put a slideshow on a page, automatically
displaying the selected image files with customizable transitions and
intervals.
Installation
============
To use this plugin, put ``media_tree.contrib.cms_plugins.media_tree_slideshow``
in your installed apps, and run ``manage.py syncdb``.
Template
========
Override the template ``cms/plugins/media_tree_slideshow.html`` if you want to
customize the output. Please take a look at the default template for more
information.
By default, images are rendered to the output using the template
``media_tree/filenode/includes/figure.html``, which includes captions.
.. Note::
The default template requires you to include `jQuery <http://jquery.com/>`_
in your pages, since it uses the `jQuery Cycle Plugin
<http://jquery.malsup.com/cy | cle/>`_ (bundled) for image tr | ansitions.
""" |
from distutils.core import setup
setup(
name = "nip",
version = "0.1a1",
py_modules = [
"nip",
],
scripts = [
| "bin/nip",
],
author = "Brian Rosner",
author_ | email = "brosner@gmail.com",
description = "nip is environment isolation and installation for Node.js",
long_description = open("README.rst").read(),
license = "MIT",
classifiers = [
"Development Status :: 2 - Pre-Alpha",
],
)
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Core.IdentifiedObject import IdentifiedObject
class GmlFill(IdentifiedObject):
"""Specifies how the area of the geometry will be filled.Specifies how the area of the geometry will be filled.
"""
def __init__(self, opacity=0.0, GmlColour=None, GmlMarks=None, GmlTextSymbols=None, GmlSvgParameters=None, GmlPolygonSymbols=None, *args, **kw_args):
"""Initialises a new 'GmlFill' instance.
@param opacity: Specifies the level of translucency to use when rendering the Fill. The value is encoded as a floating-point value between 0.0 and 1.0 with 0.0 representing completely transparent and 1.0 representing completely opaque, with a linear scale of translucency for intermediate values. The default value is 1.0
@param GmlColour:
@param GmlMarks:
@param GmlTextSymbols:
@param GmlSvgParameters:
@param GmlPolygonSymbols:
"""
#: Specifies the level of translucency to use when rendering the Fill. The value is encoded as a floating-point value between 0.0 and 1.0 with 0.0 representing completely transparent and 1.0 representing completely opaque, with a linear scale of translucency for intermediate values. The default value is 1.0
self.opacity = opacity
self._GmlColour = None
self.GmlColour = GmlColour
self._GmlMarks = []
self.GmlMarks = [] if GmlMarks is None else GmlMarks
self._GmlTextSymbols = []
self.GmlTextSymbols = [] if GmlTextSymbols is None else GmlTextSymbols
self._GmlSvgParameters = []
self.GmlSvgParameters = [] if GmlSvgParameters is None else GmlSvgParameters
self._GmlPolygonSymbols = []
self.GmlPolygonSymbols = [] if GmlPolygonSymbols is None else GmlPolygonSymbols
super(GmlFill, self).__init__(*args, **kw_args)
_attrs = ["opacity"]
_attr_types = {"opacity": float}
_defaults = {"opacity": 0.0}
_enums = {}
_refs = ["GmlColour", "GmlMarks", "GmlTextSymbols", "GmlSvgParameters", "GmlPolygonSymbols"]
_many_refs = ["GmlMarks", "GmlTextSymbols", "GmlSvgParameters", "GmlPolygonSymbols"]
def getGmlColour(self):
return self._GmlColour
def setGmlColour(self, value):
if self._GmlColour is not None:
filtered = [x for x in self.GmlColour.GmlFills if x != self]
self._GmlColour._GmlFills = filtered
self._GmlColour = value
if self._GmlColour is not None:
if self not in self._GmlColour._GmlFills:
self._GmlColour._GmlFills.append(self)
GmlColour = property(getGmlColour, setGmlColour)
def getGmlMarks(self):
return self._GmlMarks
def setGmlMarks(self, value):
for p in self._GmlMarks:
filtered = [q for q in p.GmlFIlls if q != self]
self._GmlMarks._GmlFIlls = filtered
for r in value:
if self not in r._GmlFIlls:
r._GmlFIlls.append(self)
self._GmlMarks = value
GmlMarks = property(getGmlMarks, setGmlMarks)
def addGmlMarks(self, *GmlMarks):
for obj in GmlMarks:
if self not in obj._GmlFIlls:
obj._GmlFIlls.append(self)
self._GmlMarks.append(obj)
def removeGmlMarks(self, *GmlMarks):
for obj in GmlMarks:
if self in obj._GmlFIlls:
obj._GmlFIlls.remove(self)
self._GmlMarks.remove(obj)
def getGmlTextSymbols(self):
return self._GmlTextSymbols
def setGmlTextSymbols(self, value):
for x in self._GmlTextSymbols:
x.GmlFill = None
for y in value:
y._GmlFill = self
self._GmlTextSymbols = value
GmlTextSymbols = property(getGmlTextSymbols, setGmlTextSymbols)
def addGmlTextSymbols(self, *GmlTextSymbols):
for obj in GmlTextSymbols:
obj.GmlFill = self
def removeGmlTextSymbols(self, *GmlTextSymbols):
for obj in GmlTextSymbols:
obj.GmlFill = None
def getGmlSvgParameters(self):
return self._GmlSvgParameters
def setGmlSvgParameters(self, value):
for p in self._GmlSvgParameters:
filtered = [q for q in p.GmlFills if q != self]
self._GmlSvgParameters._GmlFills = filtered
for r in value:
if self not in r._GmlFills:
r._GmlFills.append(self)
self._GmlSvgParameters = value
GmlSvgParameters = property(getGmlSvgParameters, setGmlSvgParameters)
def addGmlSvgParameters(self, *GmlSvgParameters):
for obj in GmlSvgParameters:
if self not in obj._GmlFills:
obj._GmlFills.append(self)
self._GmlSvgParameters.append(obj)
def removeGmlSvgParameters(self, *GmlSvgParameters):
for obj in GmlSvgParameters:
if self in obj._GmlFills:
obj._GmlFills.remove | (self)
self._GmlSvgParameters.remove(obj)
def getGmlPolygonSymbols(self):
return self._GmlPolygonSymbols
def setGmlPolygonSymbols(self, value):
for x in self._GmlPolygonSymbols:
x.GmlFill = None
for y in value:
y._GmlFill = self
self._GmlPolygonSymbols = value
GmlPolygonSymbols = property(getGmlPolygonSymbols, setGmlPolygonSymbols)
def addGmlPolygonSymbols(self, *GmlPolygonSymbols):
for obj in G | mlPolygonSymbols:
obj.GmlFill = self
def removeGmlPolygonSymbols(self, *GmlPolygonSymbols):
for obj in GmlPolygonSymbols:
obj.GmlFill = None
|
int, end_ip_int, latest_json, latest_datetime,
history_msgpack):
"""Build the actual key and value byte strings"""
key = ip_int_to_packed(end_ip_int)
value = msgpack_dumps((
ip_int_to_packed(begin_ip_int),
latest_json,
latest_datetime.encode('ascii'),
history_msgpack,
))
return key, value
def build_history(dicts):
"""Build a history structure"""
dicts.sort(key=DATETIME_GETTER)
unique_dicts = list(unique_justseen(dicts, key=make_squash_key))
unique_dicts.reverse()
latest, diffs_generator = dict_diff_incremental(unique_dicts)
diffs = list(diffs_generator)
return latest, diffs
def build_record(begin_ip_int, end_ip_int, dicts, existing=None):
"""Create database records for an iterable of merged dicts."""
assert dicts or existing, "no data at all to pack?"
if not dicts:
# No new dicts; avoid expensive re-serialisation. Note that
# blindly reusing the existing key/value pair from the database
# (by not updating it at all) is not correct: the begin and end
# of the range may have changed.
return build_key_value(
begin_ip_int,
end_ip_int,
existing.latest_json,
existing.latest_datetime,
existing.history_msgpack)
if not existing:
# Only new dicts, no existing data
latest, diffs = build_history(dicts)
return build_key_value(
begin_ip_int,
end_ip_int,
json_dumps(latest, ensure_ascii=False).encode('UTF-8'),
| latest['datetime'],
msgpack_dumps_utf8(diffs))
# At this poin | t we know there is both new data, and an existing
# record. These need to be merged..
if min(map(DATETIME_GETTER, dicts)) > existing.latest_datetime:
# All new data is newer than the existing record. Take
# a shortcut by simply prepending the new data to the history
# chain. This approach prevents quite a lot of overhead from
# build_history().
dicts.append(json_loads(existing.latest_json))
latest, diffs = build_history(dicts)
diffs.extend(msgpack_loads_utf8(existing.history_msgpack))
else:
# Perform a full merge
dicts.extend(existing.iter_versions())
latest, diffs = build_history(dicts)
return build_key_value(
begin_ip_int,
end_ip_int,
json_dumps(latest, ensure_ascii=False).encode('UTF-8'),
latest['datetime'],
msgpack_dumps_utf8(diffs))
class ExistingRecord(object):
"""Helper class for working with records retrieved from the database."""
def __init__(self, key, value):
# Performance note: except for the initial value unpacking, all
# expensive deserialization operations are deferred until
# requested.
unpacked = msgpack_loads(value, use_list=False)
# IP addresses
self.begin_ip_packed = unpacked[0]
self.end_ip_packed = key
# Actual data, without any expensive decoding applied
self.latest_json = unpacked[1]
self.latest_datetime = unpacked[2].decode('ascii')
self.history_msgpack = unpacked[3]
def iter_versions(self, inplace=False):
"""Lazily reconstruct all versions in this record."""
# Latest version
latest = json_loads(self.latest_json)
yield latest
# Reconstruct history by applying patches incrementally
yield from dict_patch_incremental(
latest,
msgpack_loads_utf8(self.history_msgpack),
inplace=inplace)
class Database(object):
"""
Database access class for loading and looking up data.
"""
def __init__(self, database_dir, create_if_missing=False):
logger.debug("Opening database %s", database_dir)
self.db = plyvel.DB(
database_dir,
create_if_missing=create_if_missing,
write_buffer_size=16 * 1024 * 1024,
max_open_files=512,
lru_cache_size=128 * 1024 * 1024)
self.iter = None
def iter_records(self):
"""
Iterate a database and yield records that can be merged with new data.
This generator is suitable for consumption by merge_ranges().
"""
for key, value in self.db.iterator(fill_cache=False):
record = ExistingRecord(key, value)
yield (
ip_packed_to_int(record.begin_ip_packed),
ip_packed_to_int(record.end_ip_packed),
record,
)
def load(self, *iterables):
"""Load data from importer iterables"""
if not iterables:
logger.warning("No new input files; nothing to load")
return
# Combine new data with current database contents, and merge all
# iterables to produce unique, non-overlapping ranges.
iterables = list(iterables)
iterables.append(self.iter_records())
merged = merge_ranges(*iterables)
# Progress/status tracking
n_processed = n_updated = 0
begin_ip_int = 0
reporter = PeriodicCallback(lambda: logger.info(
"%d ranges processed (%d updated, %d new); current position %s",
n_processed, n_updated, n_processed - n_updated,
ip_int_to_str(begin_ip_int)))
reporter.tick()
# Loop over current database and new data
for begin_ip_int, end_ip_int, items in merged:
if n_processed % 100 == 0:
reporter.tick()
# Find and pop existing record (if any) from the list.
existing = None
for idx, item in enumerate(items):
if isinstance(item, ExistingRecord):
existing = item
del items[idx]
break
# Build and store a new record
key, value = build_record(
begin_ip_int,
end_ip_int,
items,
existing)
self.db.put(key, value)
# Update counters
n_processed += 1
if existing is not None:
n_updated += 1
reporter.tick(True)
logger.info("Compacting database... (this may take a while)")
self.db.compact_range(start=b'\x00' * 16, stop=b'\xff' * 16)
# Force lookups to use a new iterator so new data is seen.
self.iter = None
logger.info("Loading finished")
@functools.lru_cache(128 * 1024)
def lookup(self, ip, datetime=None):
"""Lookup a single IP address in the database.
This function returns the found information as a JSON byte
string (encoded as UTF-8), or `None` if no information was
found.
If `datetime` is `None`, the latest version is returned. If
`datetime` is a datetime string, information for that timestamp
is returned. If `datetime` has the special value 'all', the full
history will be returned.
"""
# Pack incoming IP address to a format suitable for lookups.
ip_packed = ip_str_to_packed(ip)
# Iterator construction is relatively costly, so reuse it for
# performance reasons. The iterator won't see any data written
# after its construction, but that is not a problem since the
# data set is static.
if self.iter is None:
self.iter = self.db.iterator()
# The database key stores the end IP of all ranges, so a simple
# seek positions the iterator at the right key (if found).
self.iter.seek(ip_packed)
db_record = next(self.iter, None)
# If the seek moved past the last range in the database: no hit
if db_record is None:
return None
# Decode the value
key, value = db_record
record = ExistingRecord(key, value)
# Check range boundaries. If the IP currently being looked up is
# in a gap, there is no hit after all.
if ip_packed < record.begin_ip_packed:
return None
# If the lo |
#!/usr/bin/python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is | distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the RankerRoot model
"""
__authors__ = [
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from google.appengine.ext import db
import soc.models.linkable
class RankerRoot(soc.models.linkable.Linkable):
"""Links | the Root of a RankList tree to an owner and also
gives it an unique ID.
"""
#: A required reference property to the root of the RankList tree
root = db.ReferenceProperty(required=True,
collection_name='roots')
|
# This file is part of CherryPy <http://www.cherrypy.org/>
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab:fileencoding=utf-8
from cherrypy.test import test
test.prefer_parent_path()
try:
from hashlib import md5
except ImportError:
# Python 2.4 and earlier
from md5 import new as md5 |
import cherrypy
from cherrypy.lib import auth_basic
def setup_server():
class Root:
def index(self):
return "This is public."
index.exposed = True
class BasicProtected:
def index(self):
return "Hello %s, you've been authorized." % cherrypy.request.login
index.exposed = True
class BasicProtected2:
def index(self):
return "Hello %s, you've been authoriz | ed." % cherrypy.request.login
index.exposed = True
userpassdict = {'xuser' : 'xpassword'}
userhashdict = {'xuser' : md5('xpassword').hexdigest()}
def checkpasshash(realm, user, password):
p = userhashdict.get(user)
return p and p == md5(password).hexdigest() or False
conf = {'/basic': {'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'wonderland',
'tools.auth_basic.checkpassword': auth_basic.checkpassword_dict(userpassdict)},
'/basic2': {'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'wonderland',
'tools.auth_basic.checkpassword': checkpasshash},
}
root = Root()
root.basic = BasicProtected()
root.basic2 = BasicProtected2()
cherrypy.tree.mount(root, config=conf)
from cherrypy.test import helper
class BasicAuthTest(helper.CPWebCase):
def testPublic(self):
self.getPage("/")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html;charset=utf-8')
self.assertBody('This is public.')
def testBasic(self):
self.getPage("/basic/")
self.assertStatus(401)
self.assertHeader('WWW-Authenticate', 'Basic realm="wonderland"')
self.getPage('/basic/', [('Authorization', 'Basic eHVzZXI6eHBhc3N3b3JX')])
self.assertStatus(401)
self.getPage('/basic/', [('Authorization', 'Basic eHVzZXI6eHBhc3N3b3Jk')])
self.assertStatus('200 OK')
self.assertBody("Hello xuser, you've been authorized.")
def testBasic2(self):
self.getPage("/basic2/")
self.assertStatus(401)
self.assertHeader('WWW-Authenticate', 'Basic realm="wonderland"')
self.getPage('/basic2/', [('Authorization', 'Basic eHVzZXI6eHBhc3N3b3JX')])
self.assertStatus(401)
self.getPage('/basic2/', [('Authorization', 'Basic eHVzZXI6eHBhc3N3b3Jk')])
self.assertStatus('200 OK')
self.assertBody("Hello xuser, you've been authorized.")
if __name__ == "__main__":
helper.testmain()
|
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# | (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have recei | ved a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.model.oval_5 import PE_SUBSYSTEM_ENUMERATION
from scap.model.oval_5.defs.EntityStateType import EntityStateType
logger = logging.getLogger(__name__)
class EntityStatePeSubsystemType(EntityStateType):
MODEL_MAP = {
}
def get_value_enum(self):
return PE_SUBSYSTEM_ENUMERATION
|
"""ThingCloud URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from MainSystem.views import index
from AccountSystem.views import loginByPhone, register, sendCode, address, addressList, deleteAddress, changePassword, changeNickname, updateAvatar
from CloudList.views import addNewItem, getItemList, modifyNotes
from OrderSystem.views import generateOrder, modifyOrder, confirmOrder, getOrderList, cancel, complain, update, orderCallback, getOrder, delete, vipCallback
from AssistSystem.views import feedback, checkDiscount, activityList, versionInfo, communityList,getFeeList, joinUs
from VIPSystem.views import vip, vipOrder, vipConfirm
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r"^index$", index),
url(r"^account/sendcode$", sendCode),
url(r"^account/register$", register),
url(r"^account/login$", loginByPhone),
url(r"^account/avatar$", updateAvatar),
url(r"^account/address$", address),
url(r"^account/addressdelete$", deleteAddress),
url(r"^account/addresslist$", addressList),
url(r"^account/password$", changePassword),
url(r"^account/nickname$", changeNickname),
url(r"^cloudlist/additem$", addNewItem),
url(r"^cloudlist/getlist$", getItemList),
url(r"^cloudlist/modifynotes$", modifyNotes),
url(r"^order/generate$", generateOrder),
url(r"^order/address$", modifyOrder),
url(r"^order/confirm$", confirmOrder),
url(r"^order/order$", getOrder),
url(r"^order/orderlist$", getOrderList),
url(r"^order/cancel$", cancel),
url(r"^order/complain$", complain),
url(r"^order/delete$", delete),
url(r"^order/update$", update),
url(r"^order/callback$", orderCallback),
url(r"^assist/feedback$", feedback),
url(r"^assist/discount$", checkDiscount),
url(r"^assist/activitylist", activityList),
url(r"^assist/version", versionInfo),
url(r"^assist/communitylist", communityList),
url(r"^ass | ist/feelist", getFeeList),
url(r"^assist/joinus", joinUs),
url(r"^vip/vip$", vip),
url(r"^vip/order$", vipOrder),
url(r"^vip/confirm$", vipConfirm),
url(r"^vip/callback$", vipCallb | ack),
]
|
from dja | ngo.conf import settings
DEFAULTS = {
'SOCIAL_CONTENT_TYPES': (
'Facebook',
'Twitter',
'Instagram',
),
'SOCIAL_CONTENT_MAX_POSTS': None,
# Facebook
'FACEB | OOK_APP_ID': None,
'FACEBOOK_APP_SECRET': None,
# Twitter
'TWITTER_CONSUMER_KEY': None,
'TWITTER_CONSUMER_SECRET': None,
'TWITTER_ACCESS_TOKEN_KEY': None,
'TWITTER_ACCESS_TOKEN_SECRET': None,
# Instagram
'INSTAGRAM_CLIENT_ID': None,
'INSTAGRAM_CLIENT_SECRET': None,
'INSTAGRAM_ACCESS_TOKEN': None,
# YouTube
'YOUTUBE_APP_API_KEY': None,
# Tumblr
'TUMBLR_API_CONSUMER_KEY': None
}
for setting in DEFAULTS.keys():
try:
getattr(settings, setting)
except AttributeError:
setattr(settings, setting, DEFAULTS[setting])
|
F Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant l'éditeur 'spedit'.
Si des redéfinitions de contexte-éditeur standard doivent être faites, elles
seront placées dans ce package.
Note importante : ce package contient la définition d'un éditeur, mais
celui-ci peut très bien être étendu par d'autres modules. Auquel cas,
les extensions n'apparaîtront pas ici.
"""
from primaires.interpreteur.editeur.choix import Choix
from primaires.interpreteur.editeur.description import Description
from primaires.interpreteur.editeur.entier import Entier
from primaires.interpreteur.editeur.flag import Flag
from primaires.interpreteur.editeur.presentation import Presentation
from primaires.interpreteur.editeur.selection import Selection
from primaires.interpreteur.editeur.uniligne import Uniligne
from primaires.scripting.editeurs.edt_script import EdtScript
from secondaires.magie.constantes import ELEMENTS
from .edt_difficulte import EdtDifficulte
from .supprimer import NSupprimer
class EdtSpedit(Presentation):
"""Classe définissant l'éditeur de sort 'spedit'.
"""
nom = "spedit"
def __init__(self, personnage, sort):
"""Constructeur de l'éditeur"""
if personnage:
instance_connexion = personnage.instance_connexion
else:
instance_connexion = None
Presentation.__init__(self, instance_connexion, sort)
if personnage and sort:
self.construire(sort)
def __getnewargs__(self):
return (None, None)
def construire(self, sort):
"""Construction de l'éditeur"""
# Nom
nom = self.ajouter_choix("nom", "n", Uniligne, sort, "nom")
nom.parent = self
nom.prompt = "Nom du sort (sans article) : "
nom.apercu = "{objet.nom}"
nom.aide_courte = \
"Entrez le |ent|nom|ff| du sort ou |cmd|/|ff| pour revenir " \
"à la fenêtre parente.\n\nNom actuel : |bc|{objet.nom}|ff|"
# Description
description = self.ajouter_choix("description", "d", Description, \
sort)
description.parent = self
description.apercu = "{objet.description.paragraphes_indentes}"
description.aide_courte = \
"| |tit|" + "Description du sort {}".format(sort.cle).ljust(76) + \
"|ff||\n" + self.opts.separateur
# Points de tribut
tribut = self.ajouter_choix("points de tribut", "tr", Entier, sort,
"points_tribut", 1)
tribut.parent = self
tribut.prompt = "Points de tribut nécessaire pour apprendre le sort : "
tribut.apercu = "{objet.points_tribut}"
tribut.aide_courte = \
"Entrez le |ent|nombre de points de tribut|ff| nécessaires "\
"pour apprendre | le sort\nou |cmd|/|ff| pour revenir à la " \
"fenêtre parente.\n\nPoints de tribut actuels : " \
"|bc|{objet.points_tribut}|ff|"
# Éléments
elements = self.ajouter_choix("eléments", "e", Selection, sort,
"elements", ELEMENTS)
elements.parent = self
elements.apercu = "{objet.str_elements}"
elements.aide_courte = \
| "Entrez un |ent|élément|ff| pour l'ajouter " \
"ou le retirer\nou |cmd|/|ff| " \
"pour revenir à la fenêtre parente.\n\n" \
"Éléments existants : |cmd|" + "|ff|, |cmd|".join(
ELEMENTS) + "\n" \
"Éléments actuels : |bc|{objet.str_elements}|ff|"
# Type de sort
types = ["destruction", "alteration", "invocation", "illusion"]
type = self.ajouter_choix("type de sort", "s", Choix, sort,
"type", types)
type.parent = self
type.prompt = "Type de sort : "
type.apercu = "{objet.type}"
type.aide_courte = \
"Entrez le |ent|type|ff| du sort ou |cmd|/|ff| " \
"pour revenir à la fenêtre parente.\nTypes disponibles : |cmd|" \
"{}|ff|.\n\nType actuel : |bc|{{objet.type}}|ff|".format(
"|ff|, |cmd|".join(types))
# Cible
types = ["aucune", "personnage", "objet", "salle"]
cible = self.ajouter_choix("type de cible", "c", Choix, sort,
"type_cible", types)
cible.parent = self
cible.prompt = "Type de cible : "
cible.apercu = "{objet.type_cible}"
cible.aide_courte = \
"Entrez le |ent|type de cible|ff| du sort ou |cmd|/|ff| " \
"pour revenir à la fenêtre parente.\nTypes disponibles : |cmd|" \
"{}|ff|.\n\nType actuel : |bc|{{objet.type_cible}}|ff|".format(
"|ff|, |cmd|".join(types))
# Stats
stats = self.ajouter_choix("stats", "st", Selection, sort,
"stats", ("agilite", "intelligence", "sensibilite"))
stats.parent = self
stats.apercu = "{objet.str_stats}"
stats.aide_courte = \
"Entrez une |ent|stat|ff| pour l'ajouter " \
"ou la retirer\nou |cmd|/|ff| " \
"pour revenir à la fenêtre parente.\n\n" \
"stats actuelles : |bc|{objet.str_stats}|ff|"
# Difficulté
difficulte = self.ajouter_choix("difficulté", "i", Entier, sort,
"difficulte", 0, 100)
difficulte.parent = self
difficulte.prompt = "Difficulté d'apprentissage : "
difficulte.apercu = "{objet.difficulte}"
difficulte.aide_courte = \
"Paramétrez la |ent|difficulté|ff| d'apprentissage du sort " \
"entre |cmd|0|ff| et |cmd|100|ff| ou entrez\n|cmd|/|ff| pour " \
"revenir à la fenêtre parente. |cmd|100|ff| signifie que le sort " \
"ne peut pas\nêtre appris par la pratique.\n\n" \
"Difficulté actuelle : |bc|{objet.difficulte}|ff|"
# Coût
cout = self.ajouter_choix("coût", "o", Entier, sort, "cout")
cout.parent = self
cout.prompt = "Coùt en mana : "
cout.apercu = "{objet.cout}"
cout.aide_courte = \
"Entrez la |ent|quantité|ff| d'énergie magique nécessaire pour " \
"lancer ce sort ou |cmd|/|ff| pour\nrevenir à la fenêtre " \
"parente.\n\n" \
"Coût : |bc|{objet.cout}|ff|"
# Durée
duree = self.ajouter_choix("durée de concentration", "u", Entier, sort,
"duree", 1)
duree.parent = self
duree.prompt = "Durée de concentration : "
duree.apercu = "{objet.duree}"
duree.aide_courte = \
"Entrez la |ent|durée|ff| de concentration du sort, en " \
"secondes, ou |cmd|/|ff| pour revenir à\nla fenêtre parente. La " \
"durée diminue automatiquement qu |
'product': 'Firefox',
'version': '20.0',
"release": "Beta"
},
{
'product': 'Firefox',
'version': '21.0a1',
"release": "Nightly"
},
{
'product': 'Thunderbird',
'version': '20.0',
"release": "Beta",
},
{
'product': 'SeaMonkey',
'version': '9.5',
"release": "Beta"
}
]
self.current_platforms = [
{
'code': 'windows',
'name': 'Windows'
},
{
'code': 'mac',
'name': 'Mac OS X'
},
{
'code': 'linux',
'name': 'Linux'
}
]
def test_report_list(self):
def get_new_form(data):
return forms.ReportListForm(
self.current_products,
self.current_versions,
self.current_platforms,
data
)
form = get_new_form({'range_value': '-1'})
ok_(not form.is_valid()) # missing signature and invalid range
form = get_new_form({
'signature': 'sig',
'range_value': '-1'
})
ok_(not form.is_valid()) # invalid range_value
form = get_new_form({
'signature': 'sig',
'product': ['SomeUnkownProduct']
})
ok_(not form.is_valid()) # invalid product
form = get_new_form({
'signature': 'sig',
'version': 'invalidVersion'
})
ok_(not form.is_valid()) # invalid version
form = get_new_form({
'signature': 'sig',
'version': ['Another:Invalid']
})
ok_(not form.is_valid()) # invalid version
form = get_new_form({
'signature': 'sig',
'platform': ['winux']
})
ok_(not form.is_valid()) # invalid platform
form = get_new_form({
'signature': 'sig',
'plugin_query_type': 'invalid'
})
ok_(not form.is_valid()) # invalid query type
# Test all valid data
form = get_new_form({
'signature': 'sig',
'product': ['Firefox', 'SeaMonkey', 'Thunderbird'],
'version': ['Firefox:20.0'],
'platform': ['linux', 'mac'],
'date': '01/02/2012 12:23:34',
'range_unit': 'weeks',
'range_value': 12,
'reason': 'some reason',
'build_id': '20200101344556',
'process_type': 'any',
'hang_type': 'any',
'plugin_field': 'name',
'plugin_query_type': 'is_exactly',
'plugin_query': 'plugin name'
})
ok_(form.is_valid())
# Test expected types
ok_(isinstance(form.cleaned_data['date'], datetime.datetime))
ok_(isinstance(form.cleaned_data['range_value'], int))
ok_(isinstance(form.cleaned_data['product'], list))
ok_(isinstance(form.cleaned_data['version'], list))
ok_(isinstance(form.cleaned_data['platform'], list))
# Test default values
form = get_new_form({'signature': 'sig',
'range_unit': 'weeks',
'hang_type': 'any',
'process_type': 'any',
'plugin_field': 'filename'})
ok_(form.is_valid())
eq_(form.cleaned_data['product'], [])
eq_(form.cleaned_data['version'], [])
eq_(form.cleaned_data['platform'], [])
eq_(form.cleaned_data['range_unit'], 'weeks')
eq_(form.cleaned_data['process_type'], 'any')
eq_(form.cleaned_data['hang_type'], 'any')
eq_(form.cleaned_data['plugin_field'], 'filename')
def test_report_list_date(self):
def get_new_form(data):
return forms.ReportListForm(
self.current_products,
self.current_versions,
self.current_platforms,
data
)
# known formats
datetime_ = datetime.datetime(2012, 1, 2, 13, 45, 55)
date = datetime.datetime(2012, 1, 2, 0, 0)
data = {'signature': 'sig'}
fmt = '%Y-%m-%d'
form = get_new_form(dict(data, date=datetime_.strftime(fmt)))
ok_(form.is_valid(), form.errors)
eq_(form.cleaned_data['date'], date)
fmt = '%m/%d/%Y' # US format
form = get_new_form(dict(data, date=datetime_.strftime(fmt)))
ok_(form.is_valid(), form.errors)
eq_(form.cleaned_data['date'], date)
fmt = '%m/%d/%Y %H:%M:%S' # US format
form = get_new_form(dict(data, date=datetime_.strftime(fmt)))
ok_(form.is_valid(), form.errors)
eq_(form.cleaned_data['date'], datetime_)
def test_signature_summary(self):
def get_new_form(data):
return forms.SignatureSummaryForm(
self.current_products,
self.current_versions,
data,
)
form = get_new_form({'range_value': '-1'})
ok_(not form.is_valid()) # missing signature and invalid range
form = get_new_form({
'signature': 'sig',
'range_value': '-1',
'versions': 'Firefox:19.0',
})
ok_(not form.is_valid()) # invalid range_value
long_signature = 'x' * (settings.SIGNATURE_MAX_LENGTH + 1)
form = get_new_form({
'signature': long_signature,
'range_unit': 'days',
'range_value': 12,
'versions': 'Firefox:19.0',
})
ok_(not form.is_valid()) # signature too long
# Test all valid data
form = get_new_form({
'signature': 'sig',
'range_unit': 'days',
'range_value': 12,
'versions': 'Firefox:19.0',
})
ok_(form.is_valid())
# Test expected types
ok_(isinstance(form.cleaned_data['range_value'], int))
# Test default values
form = get_new_form({'signature': 'sig'})
ok_(form.is_valid())
def test_crashtrends_json(self):
now = datetime.datetime.utcnow()
week_ago = now - datetime.timedelta(days=7)
def get_new_form(data):
return forms.CrashTrendsForm(
self.current_versions,
data
)
form = get_new_form({
'product': '',
'version': '19.0',
'start_date': now,
'end_date': week_ago
})
# All fields are required
# Testing empty product
ok_(not form.is_valid())
form = get_new_form({
'product': 'Firefox',
'version': '',
'start_date': now,
'end_date': week_ago
})
# All fields are required
# Testing empty version
ok_(not form.is_valid())
form = get_new_form({
'product': 'Firefox',
'version': '21.0',
'start_date': '',
'end_date': '2012-11-02'
})
# All fields are required
# Testing empty start_date
ok_(not form.is_valid())
form = get_new_form({
'product': 'Firefox',
'version': '19.0',
'start_date': now,
'end_date': week_ago
})
# Testing invalid product version
ok_(not form.is_valid())
form = get_new_form({
'product': 'Gorilla',
'version': '19.0',
'start_date': now,
| 'end_date': week_ago
})
# Testing invalid product name
ok_(not form.is_valid())
form = get_new_form({
'product': 'Gorilla',
'version': '20.0',
'start_date': now,
'end_date': week_ago
})
# Testing valid version, invalid product name
ok_(not form.is_valid())
| form = get_new_form({
'product': 'Gorilla',
|
from flask import Blueprint, request, render_template, jsonify
from housesGenerator import House
from holdings import Holdings
houses = Blueprint('houses', __name__, url_prefix='/houses')
@houses.route('/')
def index():
return rend | er_template('houses.html')
@houses.route('/houseGenerator', methods=['GET', 'POST'])
def houseGenerator():
realm = request.args.get('realm')
size = request.args.get('size')
foundation = request.args.get('foundation')
name = request.args.get('name')
house = House.startingResources(realm, size, foundation, n | ame)
from holdings import holdingsData
generatedHouse = Holdings(holdingsData).generateAllHoldings(house, realm)
return jsonify(generatedHouse)
|
[]
if job_name:
job_template.jobName = job_name
else:
# nameless jobs sometimes breaks drmaa implementations...
job_template.jobName = "ruffus_job_" + "_".join(map(str, datetime.datetime.now().timetuple()[0:6]))
#
# optional job parameters
#
job_template.nativeSpecification = job_other_options
# separate stdout and stderr
job_template.joinFiles=False
return job_template
#_________________________________________________________________________________________
# write_job_script_to_temp_file
#_________________________________________________________________________________________
def write_job_script_to_temp_file( cmd_str, job_script_directory, job_name, job_other_options, job_environment, working_directory):
'''
returns (job_script_path, stdout_path, stderr_path)
'''
import sys
time_stmp_str = "_".join(map(str, datetime.datetime.now().timetuple()[0:6]))
# create script directory if necessary
# Ignore errors rather than test for existence to avoid race conditions
try:
os.makedirs(job_script_directory)
except:
pass
tmpfile = tempfile.NamedTemporaryFile(mode='w', prefix='drmaa_script_' + time_stmp_str + "__", dir = job_script_directory, delete = False)
#
# hopefully #!/bin/sh is universally portable among unix-like operating systems
#
tmpfile.write( "#!/bin/sh\n" )
#
# log parameters as suggested by Bernie Pope
#
for title, parameter in ( ("job_name", job_name, ),
("job_other_options", job_other_options,),
("job_environment", job_environment, ),
("working_directory", working_directory), ):
if parameter:
tmpfile.write( "#%s=%s\n" % (title, parameter))
tmpfile.write( cmd_str + "\n" )
tmpfile.close()
job_script_path = os.path.abspath( tmpfile.name )
stdout_path = job_script_path + ".stdout"
stderr_path = job_script_path + ".stderr"
os.chmod( job_script_path, stat.S_IRWXG | stat.S_IRWXU )
return (job_script_path, stdout_path, stderr_path)
#_________________________________________________________________________________________
# run_job_using_drmaa
#_________________________________________________________________________________________
def run_job_using_drmaa (cmd_str, job_name = None, job_other_options = "", job_script_directory = None, job_environment = None, working_directory = None, retain_job_scripts = False, logger = None, drmaa_session = None, verbose = 0):
"""
Runs specified command remotely using drmaa,
either with the specified session, or the module shared drmaa session
"""
import drmaa
#
# used specified session else module session
#
if drmaa_session is None:
raise error_drmaa_job( "Please specify a drmaa_session in run_job()")
#
# make job template
#
job_template = setup_drmaa_job( drmaa_session, job_name, job_environment, working_directory, job_other_options)
#
# make job script
#
if not job_script_directory:
job_script_directory = os.getcwd()
job_script_path, stdout_path, stderr_path = write_job_script_to_temp_file( cmd_str, job_script_directory, job_name, job_other_options, job_environment, working_directory)
job_template.remoteCommand = job_script_path
# drmaa paths specified as [hostname]:file_path.
# See http://www.ogf.org/Public_Comment_Docs/Documents/2007-12/ggf-drmaa-idl-binding-v1%2000%20RC7.pdf
job_template.outputPath = ":" + stdout_path
job_template.errorPath = ":" + stderr_path
#
# Run job and wait
#
jobid = drmaa_session.runJob(job_template)
if logger:
logger.debug( "job has been submitted with jobid %s" % str(jobid ))
try:
job_info = drmaa_session.wait(jobid, drmaa.Session.TIMEOUT_WAIT_FOREVER)
except Exception:
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
msg = str(exceptionValue)
# ignore message 24 in PBS
# code 24: drmaa: Job finished but resource usage information and/or termination status could not be provided.":
if not msg.startswith("code 24"): raise
if logger:
logger.info("Warning %s\n"
"The original command was:\n%s\njobid=jobid\n"
(msg.message, cmd_str,jobid) )
job_info = None
#
# Read output
#
stdout, stderr = read_stdout_stderr_from_files( stdout_path, stderr_path, logger, cmd_str)
job_info_str = ("The original command was: >> %s <<\n"
"The jobid was: %s\n"
"The job script name was: %s\n" %
(cmd_str,
jobid,
job_script_path))
def stderr_stdout_to_str (stderr, stdout):
"""
Concatenate stdo | ut and stderr to string
"""
result = ""
if stderr:
result += "The stderr was: \n%s\n\n" % ("".join( stderr))
if stdout | :
result += "The stdout was: \n%s\n\n" % ("".join( stdout))
return result
#
# Throw if failed
#
if job_info:
job_info_str += "Resources used: %s " % (job_info.resourceUsage)
if job_info.wasAborted:
raise error_drmaa_job( "The drmaa command was never ran but used %s:\n%s"
% (job_info.exitStatus, job_info_str + stderr_stdout_to_str (stderr, stdout)))
elif job_info.hasSignal:
raise error_drmaa_job( "The drmaa command was terminated by signal %i:\n%s"
% (job_info.exitStatus, job_info_str + stderr_stdout_to_str (stderr, stdout)))
elif job_info.hasExited:
if job_info.exitStatus:
raise error_drmaa_job( "The drmaa command was terminated by signal %i:\n%s"
% (job_info.exitStatus, job_info_str + stderr_stdout_to_str (stderr, stdout)))
#
# Decorate normal exit with some resource usage information
#
elif verbose:
def nice_mem_str(num):
"""
Format memory sizes
http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
"""
num = float(num)
for x in ['bytes','KB','MB','GB']:
if num < 1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
try:
resource_usage_str = []
if 'maxvmem' in job_info.resourceUsage:
if 'mem' in job_info.resourceUsage:
resource_usage_str.append("Mem=%s(%s)" % (nice_mem_str(job_info.resourceUsage['maxvmem']), job_info.resourceUsage['mem']))
else:
resource_usage_str.append("Mem=%s" % nice_mem_str(job_info.resourceUsage['maxvmem']))
if 'ru_wallclock' in job_info.resourceUsage:
resource_usage_str.append("CPU wallclock= %.2gs" % float(job_info.resourceUsage['ru_wallclock']))
if len(resource_usage_str):
logger.info("Drmaa command used %s in running %s" % (", ".join(resource_usage_str), cmd_str))
else:
logger.info("Drmaa command successfully ran %s" % cmd_str)
except:
logger.info("Drmaa command used %s in running %s" % (job_info.resourceUsage, cmd_str))
#
# clean up job template
#
drmaa_session.deleteJobTemplate(job_template)
#
# Cleanup job script unless retain_job_scripts is set
#
if retain_job_scripts:
# job scripts have the jobid as an extension
os |
#
# Copyright (c) 2017-Present Pivotal Software, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os
from operator import itemgetter
import base64
import tempfile
from google.oauth2.service_account import Credentials
"""Base class for accessing Google Cloud Platform services from Python apps
deployed to PCF. This class implements the authentication part.
Here are the various service names, as defined in
https://github.com/GoogleCloudPlatform/gcp-service-broker/blob/master/brokerapi/brokers/models/service_broker.go
const StorageName = "google-storage"
const BigqueryName = "google-bigquery"
const BigtableName = "google-bigtable"
const CloudsqlName = "google-cloudsql"
const PubsubName = "google-pubsub"
const MlName = "google-ml-apis"
"""
class PcfGcp:
def __init__(self):
self.VCAP_SERVICES = None
self.clients = {
'storage': None
, 'google-bigquery': None
, 'google-bigtable': None
, 'google-cloudsql': None
, 'google-pubsub': None
, 'language': None
, 'vision': None
}
self.projectId = None
self.bucketName = None # Storage
def className(self):
return self.__class__.__name__
def getClient(self, name):
return self.clients.get(name)
def setClient(self, name, val):
self.clients[name] = val
def get_service_instance_dict(self, serviceName): # 'google-storage', etc.
vcapStr = os.environ.get('VCAP_SERVICES')
if vcapStr is None:
raise Exception('VCAP_SERVICES not found in environment variables (this is required)')
vcap = json.loads(vcapStr)
svcs = None
try:
svcs = vcap[serviceName][0]
except:
raise Exception('No instance of ' + serviceName + ' available')
return svcs
def get_google_cloud_credentials(self, serviceName):
"""Returns oauth2 credentials of type
google.oauth2.service_account.Credentials
"""
service_info = self.get_service_instance_dict(serviceName)
pkey_data = base64.decodestring(service_info['credentials']['PrivateKeyData'])
pkey_dict = json.loads(pkey_data)
self.credentials = Credentials.from_service_account_info(pkey_dict)
# Get additional fields
self.projectId = servic | e_info['credentials']['ProjectId']
print 'ProjectID: %s' % self.projectId
if 'bucket_name' in service_info['crede | ntials']:
self.bucketName = service_info['credentials']['bucket_name']
# Set the environment variable for GCP (this was the only way to get Storage to work).
credFile = tempfile.gettempdir() + '/' + 'GCP_credentials.json'
with open(credFile, 'w') as out:
out.write(pkey_data)
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credFile
print 'Wrote credentials to %s' % credFile
print 'Set env GOOGLE_APPLICATION_CREDENTIALS to %s' % os.getenv('GOOGLE_APPLICATION_CREDENTIALS')
return self.credentials
"""Ref. https://cloud.google.com/natural-language/docs/sentiment-tutorial
score ranges from -1.0 to 1.0
magnitude ranges from 0.0 to Infinite (depends on length of document)
"""
def getLanguage(self):
if self.clients['language'] is None:
from google.cloud import language
self.clients['language'] = language.Client(self.get_google_cloud_credentials('google-ml-apis'))
# print 'projectId: %s' % self.projectId
return self.clients['language']
"""Ref. https://cloud.google.com/vision/docs/reference/libraries#client-libraries-install-python"""
def getVision(self):
if self.clients['vision'] is None:
from google.cloud import vision
self.clients['vision'] = vision.Client(project=self.projectId, credentials=self.get_google_cloud_credentials('google-ml-apis'))
return self.clients['vision']
def getStorage(self):
if self.clients['storage'] is None:
from google.cloud import storage
self.get_google_cloud_credentials('google-storage')
self.clients['storage'] = storage.Client(self.projectId)
return self.clients['storage']
def getBucketName(self):
return self.bucketName
def getBigQuery(self):
pass
def getBigtable(self):
pass
def getCloudSql(self):
pass
def getPubSub(self):
pass
|
from django.db.backends.creation import NO_DB_ALIAS
from django.db.backends.postgresql_psycopg2.base import DatabaseWrapper as Psycopg2DatabaseWrapper
from django.contrib.gis.db.backends.postgis.creation import PostGISCreation
from django.contrib.gis.db.backends.postgis.introspection import PostGISIntrospection
from django.contrib.gis.db.backends.postgis.operations import PostGISOperations
from django.contrib.gis.db.backends.postgis.schema import PostGISSchemaEditor
class DatabaseWr | apper(Psycopg2DatabaseWrapper):
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
if kwargs.get('alias', '') != NO_DB_ALIAS:
self.creation = PostGISCreation(self)
| self.ops = PostGISOperations(self)
self.introspection = PostGISIntrospection(self)
def schema_editor(self, *args, **kwargs):
"Returns a new instance of this backend's SchemaEditor"
return PostGISSchemaEditor(self, *args, **kwargs)
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import sys
import logging
from warnings import warn
import six
from scss.ast import Literal
from scss.cssdefs import _expr_glob_re, _interpolate_re
from scss.errors import SassError, SassEvaluationError, SassParseError
from scss.grammar.expression import SassExpression, SassExpressionScanner
from scss.rule import Namespace
from scss.types import String
from scss.types import Value
from scss.util import dequote
log = logging.getLogger(__name__)
class Calculator(object):
"""Expression evaluator."""
ast_cache = {}
def __init__(
self, namespace=None,
ignore_parse_errors=False,
undefined_variables_fatal=True,
):
if namespace is None:
self.namespace = Namespace()
else:
self.namespace = namespace
self.ignore_parse_errors = ignore_parse_errors
self.undefined_variables_fatal = undefined_variables_fatal
def _pound_substitute(self, result):
expr = result.group(1)
value = self.evaluate_expression(expr)
if value is None:
return self.apply_vars(expr)
elif value.is_null:
return ""
else:
return dequote(value.render())
def do_glob_math(self, cont):
"""Performs #{}-interpolation. The result is always treated as a fixed
syntactic unit and will not be re-evaluated.
"""
# TODO that's a lie! this should be in the parser for most cases.
if not isinstance(cont, six.string_types):
warn(FutureWarning(
"do_glob_math was passed a non-string {0!r} "
"-- this will no longer be supported in pyScss 2.0"
.format(cont)
))
cont = six.text_type(cont)
if '#{' not in cont:
return cont
| cont = _expr_glob_re.sub(self._pound_substitute, cont)
return cont
def apply_vars(self, cont):
# TODO this is very complicated. it should go away once everything
# valid is actually parseable.
if isinstance(cont, six.string_ | types) and '$' in cont:
try:
# Optimization: the full cont is a variable in the context,
cont = self.namespace.variable(cont)
except KeyError:
# Interpolate variables:
def _av(m):
v = None
n = m.group(2)
try:
v = self.namespace.variable(n)
except KeyError:
if self.undefined_variables_fatal:
raise SyntaxError("Undefined variable: '%s'." % n)
else:
log.error("Undefined variable '%s'", n, extra={'stack': True})
return n
else:
if v:
if not isinstance(v, Value):
raise TypeError(
"Somehow got a variable {0!r} "
"with a non-Sass value: {1!r}"
.format(n, v)
)
v = v.render()
# TODO this used to test for _dequote
if m.group(1):
v = dequote(v)
else:
v = m.group(0)
return v
cont = _interpolate_re.sub(_av, cont)
else:
# Variable succeeded, so we need to render it
cont = cont.render()
# TODO this is surprising and shouldn't be here
cont = self.do_glob_math(cont)
return cont
def calculate(self, expression, divide=False):
result = self.evaluate_expression(expression, divide=divide)
if result is None:
return String.unquoted(self.apply_vars(expression))
return result
# TODO only used by magic-import...?
def interpolate(self, var):
value = self.namespace.variable(var)
if var != value and isinstance(value, six.string_types):
_vi = self.evaluate_expression(value)
if _vi is not None:
value = _vi
return value
def evaluate_expression(self, expr, divide=False):
try:
ast = self.parse_expression(expr)
except SassError as e:
if self.ignore_parse_errors:
return None
raise
try:
return ast.evaluate(self, divide=divide)
except Exception as e:
six.reraise(SassEvaluationError, SassEvaluationError(e, expression=expr), sys.exc_info()[2])
def parse_expression(self, expr, target='goal'):
if isinstance(expr, six.text_type):
# OK
pass
elif isinstance(expr, six.binary_type):
# Dubious
warn(FutureWarning(
"parse_expression was passed binary data {0!r} "
"-- this will no longer be supported in pyScss 2.0"
.format(expr)
))
# Don't guess an encoding; you reap what you sow
expr = six.text_type(expr)
else:
raise TypeError("Expected string, got %r" % (expr,))
key = (target, expr)
if key in self.ast_cache:
return self.ast_cache[key]
try:
parser = SassExpression(SassExpressionScanner(expr))
ast = getattr(parser, target)()
except SyntaxError as e:
raise SassParseError(e, expression=expr, expression_pos=parser._char_pos)
self.ast_cache[key] = ast
return ast
def parse_interpolations(self, string):
"""Parse a string for interpolations, but don't treat anything else as
Sass syntax. Returns an AST node.
"""
# Shortcut: if there are no #s in the string in the first place, it
# must not have any interpolations, right?
if '#' not in string:
return Literal(String.unquoted(string))
return self.parse_expression(string, 'goal_interpolated_literal')
def parse_vars_and_interpolations(self, string):
"""Parse a string for variables and interpolations, but don't treat
anything else as Sass syntax. Returns an AST node.
"""
# Shortcut: if there are no #s or $s in the string in the first place,
# it must not have anything of interest.
if '#' not in string and '$' not in string:
return Literal(String.unquoted(string))
return self.parse_expression(
string, 'goal_interpolated_literal_with_vars')
__all__ = ('Calculator',)
|
class Card:
count = 0
url | = ""
name = ""
| sideboard = -1 |
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.s3.user import User
class ResultSet(list):
"""
The ResultSet is used to pass results back from the Amazon services
to the client. It is light wrapper around Python's :py:class:`list` class,
with some additional methods for parsing XML results from AWS.
Because I don't really want any dependencies on external libraries,
I'm using the standard SAX parser that comes with Python. The good news is
that it's quite fast and efficient but it makes some things rather
difficult.
You can pass in, as the marker_elem parameter, a list of tuples.
Each tuple contains a string as the first element which represents
the XML element that the resultset needs to be on the lookout for
and a Python class as the second element of the tuple. Each time the
specified element is found in the XML, a new instance of the class
will be created and popped onto the stack.
:ivar str next_token: A hash used to assist in paging through very long
result sets. In most cases, passing this value to certain methods
will give you another 'page' of results.
"""
def __init__(self, marker_elem=None):
list.__init__(self)
if isinstance(marker_elem, list):
self.markers = marker_elem
else:
self.markers = []
self.marker = None
self.key_marker = None
self.next_marker = None # avail when delimiter used
self.next_key_marker = None
self.next_upload_id_marker = None
self.next_version_id_marker = None
self.next_generation_marker= None
self.version_id_marker = None
self.is_truncated = False
self.next_token = None
self.status = True
def startElement(self, name, attrs, connection):
for t in self.markers:
if name == t[0]:
obj = t[1](connection)
self.append(obj)
return obj
if name == 'Owner':
# Makes owner available for get_service and
# perhaps other lists where not handled by
# another element.
self.owner = User()
return self.owner
return None
def to_boolean(self, value, true_value='true'):
if value == true_value:
return True
else:
return False
def endElement(self, name, value, connection):
if name == 'IsTruncated':
self.is_truncated = self.to_boolean(value)
elif name == 'Marker':
self.marker = value
elif name == 'KeyMarker':
self.key_marker = value
elif name == 'NextMarker':
self.next_marker = value
elif name == 'NextKeyMarker':
self.next_key_marker = value
elif name == 'VersionIdMarker':
self.version_id_marker = value
elif name == 'NextVersionIdMarker':
self.next_version_id_marker = value
elif name == 'NextGenerationMarker':
self.next_generation_marker = value
elif name == 'UploadIdMarker':
self.upload_id_marker = value
elif name == 'NextUploadIdMarker':
self.next_upload_id_marker = value
elif name == 'Bucket':
self.bucket = value
elif name == 'MaxUploads':
self.max_uploads = int(value)
elif name == 'MaxItems':
self.max_items = int(value)
elif name == 'Prefix':
self.prefix = value
elif name == 'return':
self.status = self.to_boolean(value)
elif name == 'StatusCode':
self.status = self.to_boolean(value, 'Success')
elif name == 'ItemName':
self.append(value)
elif name == 'NextToken':
self.next_token = value
elif name == 'nextToken':
self.next_token = value
# Code exists which expects nextToken to be available, so we
# set it here to remain backwards-compatibile.
self.nextToken = value
elif name == 'BoxUsage':
try:
connection.box_usage += float(value)
except:
pass
elif name == 'IsValid':
self.status = self.to_boolean(value, 'True')
else:
setattr(self, name, value)
class BooleanResult(object):
def __init__(self, marker_elem=None):
self.status = True
self.request_id = None
self.box_usage = None
def __repr__(self):
if self.status:
return 'True'
else:
return 'False'
def __nonzero__(self):
return self.status
def startElement(self, name, attrs, connection):
return None
def to_boolean(self, value, true_value='true'):
if value == true_value:
return True
else:
| return False
def endElemen | t(self, name, value, connection):
if name == 'return':
self.status = self.to_boolean(value)
elif name == 'StatusCode':
self.status = self.to_boolean(value, 'Success')
elif name == 'IsValid':
self.status = self.to_boolean(value, 'True')
elif name == 'RequestId':
self.request_id = value
elif name == 'requestId':
self.request_id = value
elif name == 'BoxUsage':
self.request_id = value
else:
setattr(self, name, value)
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nova.api.validation import parameter_types
host = copy.deepcopy(parameter_types.hostname)
host['type'] = ['string', 'null']
migrate | _live = {
'type': 'object',
'properties': {
'os-migrateLive': {
'type': 'object',
'properties': {
'block_migration': parameter_types.boolean,
'disk_over_commit': parameter_types.boolean,
'host': host
},
'required': ['block_migration', 'disk_over_commit', 'host'],
'additionalProperties': False,
},
},
'required': ['os-migrateLive'],
'additionalPropertie | s': False,
}
block_migration = copy.deepcopy(parameter_types.boolean)
block_migration['enum'].append('auto')
migrate_live_v2_25 = copy.deepcopy(migrate_live)
del migrate_live_v2_25['properties']['os-migrateLive']['properties'][
'disk_over_commit']
migrate_live_v2_25['properties']['os-migrateLive']['properties'][
'block_migration'] = block_migration
migrate_live_v2_25['properties']['os-migrateLive']['required'] = (
['block_migration', 'host'])
|
ew_mem - 0.1 * self.dt,
new_mem)
return new_mem
def set_reset_mem(self, mem, spikes):
"""
Reset membrane potential ``mem`` array where ``spikes`` array is
nonzero.
"""
if (hasattr(self, 'activation_str') and
self.activation_str == 'softmax'):
# Turn off reset (uncomment second line) to get a faster and better
# top-1 error. The top-5 error is better when resetting:
# new = tf.where(tf.not_equal(spikes, 0), tf.zeros_like(mem), mem)
new = tf.identity(mem)
elif self.config.get('cell', 'reset') == 'Reset by subtraction':
if self.payloads: # Experimental.
new = tf.where(tf.not_equal(spikes, 0),
tf.zeros_like(mem), mem)
else:
new = tf.where(tf.greater(spikes, 0), mem - self.v_thresh, mem)
new = tf.where(tf.less(spikes, 0), new + self.v_thresh, new)
elif self.config.get('cell', 'reset') == 'Reset by modulo':
new = tf.where(tf.not_equal(spikes, 0), mem % self.v_thresh, mem)
else: # self.config.get('cell', 'reset') == 'Reset to zero':
new = tf.where(tf.not_equal(spikes, 0), tf.zeros_like(mem), mem)
self.mem.assign(new)
def get_new_thresh(self):
"""Get new threshhold."""
thr_min = self._v_thresh / 100
thr_max = self._v_thresh
r_lim = 1 / self.dt
return thr_min + (thr_max - thr_min) * self.max_spikerate / r_lim
# return tf.cond(
# k.equal(self.time / self.dt % settings['timestep_fraction'], 0) *
# k.greater(self.max_spikerate, settings['diff_to_min_rate']/1000)*
# k.greater(1 / self.dt - self.max_spikerate,
# settings['diff_to_max_rate'] / 1000),
# lambda: self.max_spikerate, lambda: self.v_thresh)
def get_time(self):
"""Get simulation time variable.
Returns
-------
time: float
Current simulation time.
"""
return self.time.eval
def set_time(self, time):
"""Set simulation time variable.
Parameters
----------
time: float
Current simulation time.
"""
self.time.assign(time)
def init_membrane_potential(self, output_shape=None, mode='zero'):
"""Initialize membrane potential.
Helpful to avoid transient response in the beginning of the simulation.
Not needed when reset between frames is turned off, e.g. with a video
data set.
Parameters
----------
output_shape: Optional[tuple]
Output shape
mode: str
Initialization mode.
- ``'uniform'``: Random numbers from uniform distribution in
``[-thr, thr]``.
- ``'bias'``: Negative bias.
- ``'zero'``: Zero (default).
Returns
-------
init_mem: ndarray
A tensor of ``self.output_shape`` (same as layer).
"""
if output_shape is None:
output_shape = self.output_shape
if mode == 'uniform':
init_mem = tf.random.uniform(output_shape,
-self._v_thresh, self._v_thresh)
elif mode == 'bias':
init_mem = tf.zeros(output_shape, self._floatx)
if hasattr(self, 'b'):
b = self.get_weights()[1]
for i in range(len(b)):
init_mem[:, i, Ellipsis] = -b[i]
else: # mode == 'zero':
init_mem = tf.zeros(output_shape, self._floatx)
return init_mem
@tf.function
def reset_spikevars(self, sample_idx):
"""
Reset variables present in spiking layers. Can be turned off for
instance when a video sequence is tested.
"""
mod = self.config.getint('simulation', 'reset_between_nth_sample')
mod = mod if mod else sample_idx + 1
do_reset = sample_idx % mod == 0
if do_reset:
self.mem.assign(self.init_membrane_potential())
self.time.assign(self.dt)
if self.tau_refrac > 0:
self.refrac_until.assign(tf.zeros_like(self.refrac_until))
if self.spiketrain is not None:
self.spiketrain.assign(tf.zeros_like(self.spiketrain))
if self.payloads:
self.payloads.assign(tf.zeros | _like(self.payloads))
self.payloads_sum.assign(tf.zeros_like(self.payloads_sum))
if self.online_normalization and do_reset:
self.spikecounts.assign(tf.zeros_like(self.spikecounts))
self.max_spikerate.assign(0)
self.v_thresh.assign(self._v_thresh)
if clamp_var and do_r | eset:
self.spikerate.assign(tf.zeros_like(self.spikerate))
self.var.assign(tf.zeros_like(self.var))
if self.mem_input is not None:
self.mem_input.assign(tf.zeros_like(self.mem_input))
@tf.function
def init_neurons(self, input_shape):
"""Init layer neurons."""
from snntoolbox.bin.utils import get_log_keys, get_plot_keys
output_shape = self.compute_output_shape(input_shape)
if self.v_thresh is None: # Need this check because of @tf.function.
self.v_thresh = tf.Variable(self._v_thresh, name='v_thresh',
trainable=False)
if self.mem is None:
self.mem = tf.Variable(self.init_membrane_potential(output_shape),
name='v_mem', trainable=False)
if self.time is None:
self.time = tf.Variable(self.dt, name='dt', trainable=False)
# To save memory and computations, allocate only where needed:
if self.tau_refrac > 0 and self.refrac_until is None:
self.refrac_until = tf.Variable(
tf.zeros(output_shape), name='refrac_until', trainable=False)
if any({'spiketrains', 'spikerates', 'correlation', 'spikecounts',
'hist_spikerates_activations', 'operations',
'synaptic_operations_b_t', 'neuron_operations_b_t',
'spiketrains_n_b_l_t'} & (get_plot_keys(self.config) |
get_log_keys(self.config))) and self.spiketrain is None:
self.spiketrain = tf.Variable(tf.zeros(output_shape),
trainable=False, name='spiketrains')
if self.online_normalization and self.spikecounts is None:
self.spikecounts = tf.Variable(tf.zeros(output_shape),
trainable=False, name='spikecounts')
self.max_spikerate = tf.Variable(tf.zeros([1]), trainable=False,
name='max_spikerate')
if self.config.getboolean('cell', 'payloads') \
and self.payloads is None:
self.payloads = tf.Variable(tf.zeros(output_shape),
trainable=False, name='payloads')
self.payloads_sum = tf.Variable(
tf.zeros(output_shape), trainable=False, name='payloads_sum')
if clamp_var and self.spikerate is None:
self.spikerate = tf.Variable(tf.zeros(input_shape),
trainable=False, name='spikerates')
self.var = tf.Variable(tf.zeros(input_shape),
trainable=False, name='var')
if hasattr(self, 'clamp_idx'):
self.clamp_idx = self.get_clamp_idx()
def get_layer_idx(self):
"""Get index of layer."""
label = self.name.split('_')[0]
layer_idx = None
for i in range(len(label)):
if label[:i].isdigit():
layer_idx = int(label[:i])
return layer_idx
def get_clamp_idx(self):
"""Get time step when to stop clamping membrane potential.
Returns
-------
: int
Time step when to stop clamping.
"""
with open(self.filename_clamp_indic |
# -*- coding:utf-8 -*-
# Copyright (c) 2013, Theo Crevon
# Copyright (c) 2013, Greg Leclercq
#
# See the file LICENSE for copying permission.
from itertools import groupby
from swf.models.event import EventFactory, Compiled | EventFactory
from swf.models.event.workflow import WorkflowExecutionEvent
from swf.utils import cached_property
class History(object):
"""Execution events history container
History object is an Event subclass objects container
which can be built directly against an amazon | json response
using it's from_event_list method.
It is iterable and exposes a list-like __getitem__ for easier
manipulation.
:param events: Events list to build History upon
:type events: list
Typical amazon response looks like:
.. code-block:: json
{
"events": [
{
'eventId': 1,
'eventType': 'WorkflowExecutionStarted',
'workflowExecutionStartedEventAttributes': {
'taskList': {
'name': 'test'
},
'parentInitiatedEventId': 0,
'taskStartToCloseTimeout': '300',
'childPolicy': 'TERMINATE',
'executionStartToCloseTimeout': '6000',
'workflowType': {
'version': '0.1',
'name': 'test-1'
},
},
'eventTimestamp': 1365177769.585,
},
{
'eventId': 2,
'eventType': 'DecisionTaskScheduled',
'decisionTaskScheduledEventAttributes': {
'startToCloseTimeout': '300',
'taskList': {
'name': 'test'
}
},
'eventTimestamp': 1365177769.585
}
]
}
"""
def __init__(self, *args, **kwargs):
self.events = kwargs.pop('events', [])
self.raw = kwargs.pop('raw', None)
self.it_pos = 0
def __len__(self):
return len(self.events)
def __getitem__(self, val):
if isinstance(val, int):
return self.events[val]
elif isinstance(val, slice):
return History(events=self.events[val])
raise TypeError("Unknown slice format: %s" % type(val))
def __repr__(self):
events_repr = '\n\t'.join(
map(lambda e: e.__repr__(), self.events)
)
repr_str = '<History\n\t%s\n>' % events_repr
return repr_str
def __iter__(self):
return self
def next(self):
try:
next_event = self.events[self.it_pos]
self.it_pos += 1
except IndexError:
self.it_pos = 0
raise StopIteration
return next_event
@property
def last(self):
"""Returns the last stored event
:rtype: swf.models.event.Event
"""
return self.events[-1]
def latest(self, n):
"""Returns the n latest events stored in the History
:param n: latest events count to return
:type n: int
:rtype: list
"""
end_pos = len(self.events)
start_pos = len(self.events) - n
return self.events[start_pos:end_pos]
@property
def first(self):
"""Returns the first stored event
:rtype: swf.models.event.Event
"""
return self.events[0]
@property
def finished(self):
"""Checks if the History matches with a finished Workflow
Execution history state.
"""
completion_states = (
'completed',
'failed',
'canceled',
'terminated'
)
if (isinstance(self.last, WorkflowExecutionEvent) and
self.last.state in completion_states):
return True
return False
def filter(self, **kwargs):
"""Filters the history based on kwargs events attributes
Basically, allows to filter the history events upon their
types and states. Can be used for example to retrieve every
'DecisionTask' in the history, to check the presence of a specific
event and so on...
example:
.. code-block:: python
>>> history_obj.filter(type='ActivityTask', state='completed') # doctest: +SKIP
<History
<Event 23 ActivityTask : completed>
<Event 42 ActivityTask : completed>
<Event 61 ActivityTask : completed>
>
>>> history_obj.filter(type='DecisionTask') # doctest: +SKIP
<History
<Event 2 DecisionTask : scheduled>
<Event 3 DecisionTask : started>
<Event 7 DecisionTask : scheduled>
<Event 8 DecisionTask : started>
<Event 20 DecisionTask : scheduled>
<Event 21 DecisionTask : started>
>
:rtype: swf.models.history.History
"""
return filter(
lambda e: all(getattr(e, k) == v for k, v in kwargs.iteritems()),
self.events
)
@property
def reversed(self):
for i in xrange(len(self.events) - 1, -1, -1):
yield self.events[i]
@property
def distinct(self):
"""Extracts distinct history events based on their types
:rtype: list of swf.models.event.Event
"""
distinct_events = []
for key, group in groupby(self.events, lambda e: e.type):
g = list(group)
# Merge every WorkflowExecution events into same group
if (len(g) == 1 and
len(distinct_events) >= 1 and
g[0].type == "WorkflowExecution"):
# WorfklowExecution group will always be in first position
distinct_events[0].extend(g)
else:
distinct_events.append(list(g))
return distinct_events
def compile(self):
"""Compiles history events into a stateful History
based on events types and states transitions.
Every events stored in the resulting history are stateful
CompiledEvent subclasses instances then.
:rtype: swf.models.history.History made of swf.models.event.CompiledEvent
"""
distinct_events = self.distinct
compiled_history = []
for events_list in distinct_events:
if len(events_list) > 0:
compiled_event = CompiledEventFactory(events_list[0])
for event in events_list[1:]:
compiled_event.transit(event)
compiled_history.append(compiled_event)
return History(events=compiled_history)
@cached_property
def compiled(self):
"""Compiled history version
:rtype: swf.models.history.History made of swf.models.event.CompiledEvent
"""
return self.compile()
@classmethod
def from_event_list(cls, data):
"""Instantiates a new ``swf.models.history.History`` instance
from amazon service response.
Every member of the History are ``swf.models.event.Event``
subclasses instances, exposing their type, state, and so on to
facilitate decisions according to the history.
:param data: event history description (typically, an amazon response)
:type data: dict
:returns: History model instance built upon data description
:rtype : swf.model.event.History
"""
events_history = []
for index, d in enumerate(data):
event = EventFactory(d)
events_history.append(event)
return cls(events=events_history, raw=data)
|
from fitensemble import belt
import itertools
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import ALA3
import experiment_loader
grid = itertools.product(ALA3.ff_list, ALA3.prior_list)
bayesian_bootstrap_run = 0
for k, (ff, prior) in enumerate(grid):
print(ff, prior)
regularization_strength = ALA3.regularization_strength_dict[prior][ff]
predictions, measurements, uncertainties = experiment_loader.load(ff)
pymc_filename = ALA3.data_directory + "/models/model_%s_%s_reg-%.1f-BB%d.h5" % (ff, prior, regularization_strength, bayesian_bootstrap_run)
belt_model = belt.BELT.load(pymc_filename)
a = belt_model.mcmc.trace("al | pha")[:]
plt.figure()
plt.title("%s - %s" % (ALA3.ff_map[ff], prior))
y = a[:,0]
x = np.arange(len(y)) * ALA3.thin
plt.plot(x, y)
plt.xlabel("MCMC steps")
#plt.ylabel(r"$\alpha$:" + str(pr | edictions.columns[0]))
plt.ylabel(predictions.columns[0])
plt.savefig(ALA3.outdir+"/%s-%s-MCMC_Trace.png" % (prior, ff), bbox_inches='tight')
|
"""
platformer.py
Author: Brian S
Credit: Finn H
Assignment:
Write and submit a program that implements the sandbox platformer game:
https://github.com/HHS-IntroProgramming/Platformer
"""
from ggame import App, Color, LineStyle, Sprite, RectangleAsset, CircleAsset, EllipseAsset, PolygonAsset, ImageAsset, Frame
SCREEN_WIDTH = 1000
SCREEN_HEIGHT = 800
blue = Color(0x2EFEC8, 1.0)
black = Color(0x000000, 1.0)
pink = Color(0xFF00FF, 1.0)
red = Color(0xFF5733, 1.0)
white = Color(0xFFFFFF, 1.0)
red = Color(0xff0000, 1.0)
green = Color(0x00ff00, 1.0)
blue = Color(0x0000ff, 1.0)
black = Color(0x000000, 1.0)
white = Color(0xffffff, 1.0)
grey = Color(0xC0C0C0, 1.0)
thinline = LineStyle(2, black)
blkline = LineStyle(1, black)
noline = LineStyle(0, white)
coolline = LineStyle(1, black)
blueline = LineStyle(2, blue)
redline = LineStyle(1, red)
greenline = LineStyle(1, pink)
gridline = LineStyle(1, grey)
grid=RectangleAsset(30,30,gridline,white)
black = Color(0, 1)
bg_asset = RectangleAsset(SCREEN_WIDTH, SCREEN_HEIGHT, noline, red)
bg = Sprite(bg_asset, (0,0))
class Guy(Sprite):
guy = RectangleAsset(20, 40, coolline, green)
def __init__(self, x, y):
super().__init__(Guy.guy, (x, y))
self.x = x
self.y = y
class Brick(Sprite):
brick = RectangleAsset(30, 30, thinline, pink)
def __init__(self, x, y):
super().__init__(Brick.brick, (x, y))
self.x = x
self.y = y
def step(self):
self.grav += 0.25
self.y += self.grav
collide = self.collidingWithSprites(Brick)
if collide:
self.y -= self.grav
self.grav = 0
class Spring(Sprite):
spring = RectangleAsset(30, 5, thinline, blue)
def __init__(self, x, y):
super().__init__(Spring.spring, (x, y))
self.x = x
self.y = y
grav=0
springgrav = 0
class Platformer(App):
def __init__(self, SCREEN_WIDTH, SCREEN_HEIGHT):
super().__init__()
self.mousex = 0
self.mousey = 0
self.guy = 0
self.guysprite = None
self.brick = None
self.spring = None
self.listenKeyEvent('keydown', 'p', self.createGuy)
self.listenKeyEvent('keydown', 'w', self.createBrick)
self.listenMouseEvent('mousemove', self.motion)
self.listenKeyEvent('keydown', 'right arrow', self.R)
self.listenKeyEvent('keydown', 'left arrow', self.L)
self.listenKeyEvent('keydown', 'up arrow', self.U)
self.listenKeyEvent('keydown', 'down arrow', self.D)
self.listenKeyEvent('keydown', 's', self.createSpring)
def motion(self, event):
self.mousex = event.x
self.mousey = event.y
def createBrick(self, event):
x = self.mousex - self.mousex%30
y = self.mousey - self.mousey%30
Brick(x-10, y-10)
def createSpring(self, event):
global springgrav
x = self.mousex
y = self.mousey
Spring(x, y)
def createGuy (self, event):
global grav
if self.guysprite:
self.guysprite.destroy()
| grav = 0
self.guysprite = Guy(self.mous | ex - 30, self.mousey - 30)
def U(self, event):
global grav
if grav == 0:
grav = -10
collisions = self.guysprite.collidingWithSprites(Brick)
if collisions:
self.guysprite.y += 50
def D(self, event):
self.guysprite.y += 5
collisions = self.guysprite.collidingWithSprites(Brick)
if collisions:
self.guysprite.y -= 5
def R(self, event):
self.guysprite.x += 10
collisions = self.guysprite.collidingWithSprites(Brick)
if collisions:
self.guysprite.x -= 10
def L(self, event):
self.guysprite.x -= 10
collisions = self.guysprite.collidingWithSprites(Brick)
if collisions:
self.guysprite.x += 10
def step(self):
global grav
global springgrav
if self.guysprite:
grav += 0.5
self.guysprite.y += grav
collisions = self.guysprite.collidingWithSprites(Brick)
if collisions:
self.guysprite.y -= grav
grav = 0
sprang = self.guysprite.collidingWithSprites(Spring)
if sprang:
grav -= 10
self.guysprite.y += grav
myapp = Platformer(SCREEN_WIDTH, SCREEN_HEIGHT)
myapp.run() |
9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import phash
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Python Boilerplate'
copyright = u'2014, Chris Adams'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = phash.__version__
# The full version, including alpha/beta/rc tags.
release = phash.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output -- | -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = [ | ]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'phashdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'phash.tex', u'Python Boilerplate Documentation',
u'Chris Adams', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'phash', u'Python Boilerplate Documentation',
[u'Chris Adams'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'phash', u'Python Boilerplate Documentation',
u'Chris Adams', 'phash', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "To |
from greencouriers.tests import *
class TestCourierController(TestController):
def test_index(self):
| response = self.app.get(url(contr | oller='courier', action='index'))
# Test response...
|
"""
Copyright (c) 2012 Casey Dunham <casey.dunham@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
__author__ = 'Casey Dunham <casey.dunham@gmail.com>'
__version__ = '0.1'
import argparse
import urllib
import sys
from urllib2 import (Request, urlopen, HTTPError, URLError)
try:
# Python >= 2.6
import json
except ImportError:
try:
# Python < 2.6
import simplejson as json
except ImportError:
try:
# Google App Engine
from django.utils import simplejson as json
except ImportError:
raise ImportError, "Unable to load a json library"
class TweetDumpError(Exception):
@property
def message(self):
return self.args[0]
class RateLimitError(TweetDumpError):
pass
API_URL = "https://api.twitter.com/1/statuses/user_timeline.json?%s"
# we are not authenticating so this will return the rate limit based on our IP
# see (https://dev.twitter.com/docs/api/1/get/account/rate_limit_status)
RATE_LIMIT_API_URL = "https://api.twitter.com/1/account/rate_limit_status.json"
parser = argparse.ArgumentParser(description="dump all tweets from user")
parser.add_argument("handle", type=str, help="twitter screen name")
def get_tweets(screen_name, count, maxid=None):
params = {
"screen_name": screen_name,
"count": count,
"exclude_replies": "true",
"include_rts": "true"
}
# if we include the max_id from the last tweet we retrieved, we will retrieve the same tweet again
# so decrement it by one to not retrieve duplicate tweets
if maxid:
params["max_id"] = int(maxid) - 1
encoded_params = urllib.urlencode(params)
query = API_URL % encoded_params
resp = fetch_url(query)
ratelimit_limit = resp.headers["X-RateLimit-Limit"]
ratelimit_remaining = resp.headers["X-RateLimit-Remaining"]
ratelimit_reset = resp.headers["X-RateLimit-Reset"]
tweets = json.load | s(resp.read())
| return ratelimit_remaining, tweets
def get_initial_rate_info():
resp = fetch_url(RATE_LIMIT_API_URL)
rate_info = json.loads(resp.read())
return rate_info["remaining_hits"], rate_info["reset_time_in_seconds"], rate_info["reset_time"]
def fetch_url(url):
try:
return urlopen(Request(url))
except HTTPError, e:
if e.code == 400: # twitter api limit reached
raise RateLimitError(e.code)
if e.code == 502: # Bad Gateway, sometimes get this when making requests. just try again
raise TweetDumpError(e.code)
print >> sys.stderr, "[!] HTTP Error %s: %s" % (e.code, e.msg)
except URLError, e:
print >> sys.stderr, "[!] URL Error: %s URL: %s" % (e.reason, url)
exit(1)
def print_banner():
print "tweet-dump %s (c) 2012 %s" % (__version__, __author__)
print """ .-.
(. .)__,')
/ V )
\ ( \/ .
`._`.__\\ o ,
<< `' .o..
"""
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog="tweet-dump")
parser.add_argument('username', help="Twitter Screen Name")
parser.add_argument('file', help="File to write tweeets to")
parser.add_argument('--count', help="Number of tweets to retrieve per request", default=200)
parser.add_argument('--maxid', help="ID of Tweet to start dumping after", default=None)
args = parser.parse_args()
screen_name = args.username
count = args.count
maxid = args.maxid
out_file_name = args.file
out_file = None
try:
out_file = open(out_file_name, 'w')
except IOError, e:
print >> sys.stderr, "[!] error creating file %s" % out_file_name
exit(1)
print_banner()
print "[*] writing tweets to %s \n[*] dumping tweets for user %s" % (out_file_name, screen_name)
#print "[*] dumping tweets for user %s" % screen_name,
max_requests = 5
requests_made = 0
tweet_count = 0
while True:
# get initial rate information
(remaining, rst_time_s, rst_time) = get_initial_rate_info()
while remaining > 0:
try:
(remaining, tweets) = get_tweets(screen_name, count, maxid)
except RateLimitError:
pass
except TweetDumpError, e:
pass
else:
requests_made += 1
if len(tweets) > 0:
for tweet in tweets:
maxid = tweet["id"]
out_file.write(u"%s %s: %s\n" % (tweet["created_at"], maxid, repr(tweet["text"])))
tweet_count += 1
else:
print "[*] reached end of tweets"
break
break
print "[*] %d tweets dumped!" % tweet_count
|
# proxy module
| from __future__ import absolute_import
from envisage.resource.resource_manage | r import *
|
# Copyright (c) 2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
import os
import socket
from pulp.common.config import Config, REQUIRED, ANY, NUMBER, BOOL, OPTIONAL
DEFAULT = {
'server': {
'host': socket.gethostname(),
'port': '443',
'api_prefix': '/pulp/api',
'rsa_pub': '/etc/pki/pulp/consumer/server/rsa_pub.key',
'verify_ssl': 'true',
'ca_path': '/etc/pki/tls/certs/ca-bundle.crt',
},
'authentication': {
'rsa_key': '/etc/pki/pulp/consumer/rsa.key',
'rsa_pub': '/etc/pki/pulp/consumer/rsa_pub.key'
},
'client': {
'role': 'consumer'
},
'filesystem': {
'extensions_dir': '/usr/lib/pulp/consumer/extensions',
'repo_file': '/etc/yum.repos.d/pulp.repo',
'mirror_list_dir': '/etc/yum.repos.d',
'gpg_keys_dir': '/etc/pki/pulp-gpg-keys',
'cert_dir': '/etc/pki/pulp/client/repo',
'id_cert_dir': '/etc/pki/pulp/consumer/',
'id_cert_filename': 'consumer-cert.pem',
},
'reboot': {
'permit': 'false',
'delay': '3',
},
'output': {
'poll_frequency_in_seconds': '1',
'enable_color': 'true',
'wrap_to_terminal': 'false',
'wrap_width': '80',
},
'messaging': {
'scheme': 'amqp',
'host': None,
'port': '5672',
'transport': 'qpid',
'cacert': None,
'clientcert': None,
},
'profile': {
'minutes': '240',
}
}
SCHEMA = (
('server', REQUIRED,
(
('host', REQUIRED, ANY),
('port', REQUIRED, NUMBER),
('api_prefix', REQUIRED, ANY),
('verify_ssl', REQUIRED, BOOL),
('ca_path', REQUIRED, ANY),
('rsa_pub', REQUIRED, ANY),
)
),
('authentication', REQUIRED,
(
('rsa_key', REQUIRED, ANY),
('rsa_pub', REQUIRED, ANY),
)
),
('client', REQUIRED,
(
('role', REQUIRED, r'consumer'),
)
),
('filesystem', REQUIRED,
(
('extensions_dir', REQUIRED, ANY),
('repo_file', REQUIRED, ANY),
('mirror_list_dir', REQUIRED, ANY),
('gpg_keys_dir', REQUIRED, ANY),
('cert_dir', REQUIRED, ANY),
('id_cert_dir', REQUIRED, ANY),
('id_cert_filename', REQUIRED, ANY),
)
),
('reboot', REQUIRED,
(
('permit', REQUIRED, BOOL),
('delay', REQUIRED, NUMBER),
)
),
('output', REQUIRED,
(
('poll_frequency_in_seconds', REQUIRED, NUMBER),
('enable_color', REQUIRED, BOOL),
('wrap_to_terminal', REQUIRED, BOOL),
('wrap_width', REQUIRED, NUMBER)
)
),
('messaging', REQUIRED,
( |
('scheme', REQUIRED, r'(tcp|ssl|amqp|amqps)'),
('host', OPTIONAL, ANY),
('port', REQUIRED, NUMBER),
('transport', REQUIRED, ANY),
('cacert', OPTIONAL, ANY),
('clien | tcert', OPTIONAL, ANY)
)
),
('profile', REQUIRED,
(
('minutes', REQUIRED, NUMBER),
)
),
)
def read_config(paths=None, validate=True):
"""
Read and validate the consumer configuration.
:param validate: Validate the configuration.
:param validate: bool
:param paths: A list of paths to configuration files to read.
Reads the standard locations when not specified.
:param paths: list
:return: A configuration object.
:rtype: Config
"""
if not paths:
paths = ['/etc/pulp/consumer/consumer.conf']
conf_d_dir = '/etc/pulp/consumer/conf.d'
paths += [os.path.join(conf_d_dir, i) for i in sorted(os.listdir(conf_d_dir))]
overrides = os.path.expanduser('~/.pulp/consumer.conf')
if os.path.exists(overrides):
paths.append(overrides)
config = Config(DEFAULT)
config.update(Config(*paths))
if validate:
config.validate(SCHEMA)
return config
|
#-
# Copyright (c) 2014 Michael Roe
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from | nose.plugins.attrib import attr
class test_cp2_x_cincoffset_sealed(BaseBERITestCase):
@attr('capabilities')
def test_cp2_x_cincoffset_sealed_1(self):
'''Test that CIncOffset on a sealed capability does not change | the offsrt'''
self.assertRegisterEqual(self.MIPS.a0, 0, "CIncOffset changed the offset of a sealed capability")
@attr('capabilities')
def test_cp2_x_cincoffset_sealed_2(self):
'''Test that CIncOffset on a sealed capability raised an exception'''
self.assertRegisterEqual(self.MIPS.a2, 1, "CIncOffset on a sealed capability did not raise an exception")
@attr('capabilities')
def test_cp2_x_cincoffset_sealed_3(self):
'''Test that CIncOffset on a sealed capability sets CapCause'''
self.assertRegisterEqual(self.MIPS.a3, 0x0301, "CIncOffset on a sealed capability did not set CapCause correctly")
|
from datetime import datetime, date, time, timedelta
from decimal import Decimal
SECRET_KEY = 'cheese'
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib | .messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
DATABASE_ENGINE = 'sqlite3'
DATABASES = {
| 'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
},
'secondary': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.staticfiles',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'constance',
'constance.backends.database',
)
ROOT_URLCONF = 'tests.urls'
CONSTANCE_REDIS_CONNECTION_CLASS = 'tests.redis_mockup.Connection'
CONSTANCE_ADDITIONAL_FIELDS = {
'yes_no_null_select': [
'django.forms.fields.ChoiceField',
{
'widget': 'django.forms.Select',
'choices': ((None, "-----"), ("yes", "Yes"), ("no", "No"))
}
],
# note this intentionally uses a tuple so that we can test immutable
'email': ('django.forms.fields.EmailField',),
}
USE_TZ = True
CONSTANCE_CONFIG = {
'INT_VALUE': (1, 'some int'),
'BOOL_VALUE': (True, 'true or false'),
'STRING_VALUE': ('Hello world', 'greetings'),
'DECIMAL_VALUE': (Decimal('0.1'), 'the first release version'),
'DATETIME_VALUE': (datetime(2010, 8, 23, 11, 29, 24),
'time of the first commit'),
'FLOAT_VALUE': (3.1415926536, 'PI'),
'DATE_VALUE': (date(2010, 12, 24), 'Merry Chrismas'),
'TIME_VALUE': (time(23, 59, 59), 'And happy New Year'),
'TIMEDELTA_VALUE': (timedelta(days=1, hours=2, minutes=3), 'Interval'),
'CHOICE_VALUE': ('yes', 'select yes or no', 'yes_no_null_select'),
'LINEBREAK_VALUE': ('Spam spam', 'eggs\neggs'),
'EMAIL_VALUE': ('test@example.com', 'An email', 'email'),
}
DEBUG = True
STATIC_ROOT = './static/'
STATIC_URL = '/static/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'constance.context_processors.config',
],
},
},
]
|
"""
URLconf for registration and activation, using django-registration's
default backend.
If the default behavior of these views is acceptable to you, simply
use a line like this in your root URLconf to set up the default URLs
for registration::
(r'^accounts/', include('registration.backends.default.urls')),
This will also automatically set up the views in
``django.contrib.auth`` at sensible default locations.
If you'd like to customize the behavior (e.g., by passing extra
arguments to the various views) or split up the URLs, feel free to set
up your own URL patterns for these views instead.
"""
from django.conf.urls import *
from django.views import generic as generic_views
from registration.views import activate
from registration.views import register
urlpatterns = [
url(
r'^activate/complete/$',
generic_views.TemplateView.as_view(
template_name='registration/activation_complete.html'
),
name='registration_activation_complete'
),
# Activation keys get matched by \w+ instead o | f the more specific
# [a-fA-F0-9]{40} because a bad activation key should still get to the | view;
# that way it can return a sensible "invalid key" message instead of a
# confusing 404.
url(
r'^activate/(?P<activation_key>\w+)/$',
activate,
{'backend': 'registration.backends.default.DefaultBackend'},
name='registration_activate'
),
url(
r'^register/$',
register,
{'backend': 'registration.backends.default.DefaultBackend'},
name='registration_register'
),
url(
r'^register/complete/$',
generic_views.TemplateView.as_view(
template_name='registration/registration_complete.html'
),
name='registration_complete'
),
url(
r'^register/closed/$',
generic_views.TemplateView.as_view(
template_name='registration/registration_closed.html'
),
name='registration_disallowed'
),
url(r'', include('registration.auth_urls')),
]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2009 Scott Kirkwood. All Rights Reserved.
"""
Build everything for mm2s5.
You'll need:
sudo apt-get install alien help2man fakeroot lintian
Also python-bdist
"""
from pybdist import pybdist
import optparse
import setup
def main():
parser = optparse.OptionParser()
pybdist.add_standard_options(parser)
(options, unused_args) = parser.parse_args()
if not pybdist.handle_standard_options(options | , setup):
print 'Doing nothing. --help for commands.'
if __name__ == '__main__':
main()
| |
# Copyright (c) 2015, Frappe Technologies Pvt. | Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
test_records = fra | ppe.get_test_records('Cost Center') |
# -*- coding: utf-8 -*-
__author__ = 'itconsense@gmail.com'
from collections import OrderedDict
from math import pi
from Products.Five import BrowserView
from plone import api
import base64
import logging
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import six
LOG = logging.getLogger('evaluate')
class UpgradeIt(BrowserView):
def __call__(self):
portal_setup = api.portal.get_tool(name='portal_setup')
portal_setup.runImportStepFromProfile(
'profile-plonetheme.sunburst:default', 'cssregistry', run_dependencies=False)
portal_skins = api.portal.get_tool(name='portal_skins')
custom = portal_skins['custom']
for oid in ['main_template', 'base_properties', 'ploneCustom.css']:
if oid in custom:
api.content.delete(obj=custom[oid])
return "DONE"
class Result(object):
def __init__(self):
self.good = ''
self.details = {}
class EvaluateTestView(BrowserView):
no_text = 'Kein Textbaustein'
factors = {
'Meistens': 5,
'Manchmal': 3,
'Selten': 1,
'Nie': 0
}
pie_factors = {
'Meistens': 3,
'Manchmal': 2,
'Selten': 1,
'Nie': 0
}
chart_img = ''
def get_detail_elements(self):
zope_script = self.context.restrictedTraverse('text_detail_elements')
| return zope_script()
def get_summary_elements(self):
zope_script = self.context.restrictedTraverse('text_summary_elements')
return zope_script()
def text_blocks(self):
result = OrderedDict()
form = self.request.form
summary = 0
df = Ordere | dDict()
elements = self.get_detail_elements()
for i, group in enumerate(elements.keys()):
if group not in form:
continue
group_title = self.context[group].Title()
result[group_title] = Result()
good_values = []
for key, val in form[group].items():
summary += self.factors[val]
element = elements[group].get(key, self.no_text)
title = element.get('Titel', group_title)
if val == 'Meistens':
good_values.append(title)
continue
text = element.get(val)
if not text:
continue
if val in element:
result[group_title].details[title] = text
else:
result[group_title].details[title] = element.get('default')
u_group_title = unicode(group_title, 'utf-8')
if u_group_title not in df:
df[u_group_title] = 0
df[u_group_title] += self.pie_factors[val]
if good_values:
result[group_title].good = ', '.join(good_values)
if not result[group_title].details:
LOG.warn('Details of group {0} are empty!'.format(group))
summary_elements = self.get_summary_elements()
if summary < 75:
result['summary'] = summary_elements['bad']
elif 75 >= summary < 130:
result['summary'] = summary_elements['med']
else:
result['summary'] = summary_elements['good']
self.chart_img = 'data:image/jpeg;base64, ' + self.get_radar_chart(df)
self.legend = df.keys()
return result
def get_radar_chart(self, df):
LOG.info('{0}'.format(df))
# number of variable
categories = list(df)
N = len(categories)
# We are going to plot the first line of the data frame.
# But we need to repeat the first value to close the circular graph:
values = df.values()
values.append(values[0])
# What will be the angle of each axis in the plot? (we divide the plot / number of variable)
angles = [n / float(N) * 2 * pi for n in range(N)]
angles += angles[:1]
# Initialise the spider plot
fig = plt.figure()
ax = plt.subplot(111, polar=True)
# Draw one axe per variable + add labels labels yet
plt.xticks(angles[:-1], range(1, N+1), color='grey', size=8, rotation='vertical')
# Draw ylabels
ax.set_rlabel_position(0)
plt.yticks([])
plt.ylim(0, min(21, max(values)) + 1)
# Plot data
ax.plot(angles, values, linewidth=1, linestyle='solid')
# Fill area
ax.fill(angles, values, 'b', alpha=0.1)
fig.savefig('test.png')
img = six.BytesIO()
fig.savefig(img, format='png')
img.seek(0)
return base64.b64encode(img.read())
|
from django.contrib import admin
from django.conf import settings
from geonode.base.models import (TopicCategory, SpatialRepresentationType,
Region, RestrictionCodeType, ContactRole, ResourceBase, Link, License, Thumbnail)
class LicenseAdmin(admin.ModelAdmin):
model = License
list_display = ('id', 'name')
list_display_links = ('name',)
class ResourceBaseAdmin(admin.ModelAdmin):
list_display = ('id','title', 'date', 'category')
list_display_links = ('id',)
class TopicCategoryAdmin(admin.ModelAdmin):
model = TopicCategory
list_display_links = ('identifier',)
list_display = ('identifier', 'description', 'gn_description', 'is_choice' | )
if settings.MODIFY_TOPICCATEGORY==False:
exclude = ('identifier', 'description',)
def has_add_permission(self, request):
# the records are from the standard TC 211 list, so no way to add
if settings.MODIFY_TOPICCATEGORY:
return True
else:
return False
def has_delete_permission(self, request, obj=None):
# the records ar | e from the standard TC 211 list, so no way to remove
if settings.MODIFY_TOPICCATEGORY:
return True
else:
return False
class RegionAdmin(admin.ModelAdmin):
model = Region
list_display_links = ('name',)
list_display = ('code', 'name')
search_fields = ('code', 'name',)
class SpatialRepresentationTypeAdmin(admin.ModelAdmin):
model = SpatialRepresentationType
list_display_links = ('identifier',)
list_display = ('identifier', 'description', 'gn_description', 'is_choice')
def has_add_permission(self, request):
# the records are from the standard TC 211 list, so no way to add
return False
def has_delete_permission(self, request, obj=None):
# the records are from the standard TC 211 list, so no way to remove
return False
class RestrictionCodeTypeAdmin(admin.ModelAdmin):
model = RestrictionCodeType
list_display_links = ('identifier',)
list_display = ('identifier', 'description', 'gn_description', 'is_choice')
def has_add_permission(self, request):
# the records are from the standard TC 211 list, so no way to add
return False
def has_delete_permission(self, request, obj=None):
# the records are from the standard TC 211 list, so no way to remove
return False
class ContactRoleAdmin(admin.ModelAdmin):
model = ContactRole
list_display_links = ('id',)
list_display = ('id','contact', 'resource', 'role')
list_editable = ('contact', 'resource', 'role')
class LinkAdmin(admin.ModelAdmin):
model = Link
list_display_links = ('id',)
list_display = ('id', 'resource', 'extension', 'link_type', 'name', 'mime')
list_filter = ('resource', 'extension', 'link_type', 'mime')
search_fields = ('name', 'resource__title',)
class ThumbnailAdmin(admin.ModelAdmin):
model = Thumbnail
list_display = ('get_title', 'get_geonode_type', 'thumb_file', 'get_thumb_url',)
search_fields = ('resourcebase__title',)
def get_title(self, obj):
rb = obj.resourcebase_set.all()[0] # should be always just one!
return rb.title
get_title.short_description = 'Title'
def get_thumb_url(self, obj):
rb = obj.resourcebase_set.all()[0] # should be always just one!
return u'<img src="%s" alt="%s" height="80px" />' % (rb.get_thumbnail_url(),
obj.id)
get_thumb_url.allow_tags = True
get_thumb_url.short_description = 'URL'
def get_geonode_type(self, obj):
rb = obj.resourcebase_set.all()[0] # should be always just one!
return rb.class_name
get_geonode_type.short_description = 'Type'
admin.site.register(TopicCategory, TopicCategoryAdmin)
admin.site.register(Region, RegionAdmin)
admin.site.register(SpatialRepresentationType, SpatialRepresentationTypeAdmin)
admin.site.register(RestrictionCodeType, RestrictionCodeTypeAdmin)
admin.site.register(ContactRole, ContactRoleAdmin)
admin.site.register(ResourceBase, ResourceBaseAdmin)
admin.site.register(Link, LinkAdmin)
admin.site.register(Thumbnail, ThumbnailAdmin)
admin.site.register(License, LicenseAdmin)
|
############################################################################
##
## Copyright (C) 2006-2008 University of Utah. All rights reserved.
##
## This file is part of VisTrails.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
# | # this file. Please review the following to ensure GNU General Public
## Licensing requirements will be met:
## http://www.opensource.org/licenses/gpl-license.php
##
## If you are unsure which license is appropriate for your use (for
## insta | nce, you are interested in developing a commercial derivative
## of VisTrails), please contact us at contact@vistrails.org.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
############################################################################
""" Do not edit this file!
File automatically generated by scripts/gen_init.py
Change History:
version : description
0.2 : Integrated quickplot module that displays the CDAT plot
widget inside the spreadsheet
0.1 : First automatically generated package based on xml descriptions
"""
from PyQt4 import QtCore, QtGui
import sip
import core.modules
import core.modules.module_registry
from core.modules.vistrails_module import (Module, NotCacheable,
ModuleError, new_module)
from core.bundles import py_import
import os, sys
#cdat specific packages
vcs = py_import('vcs',{})
cdms2 = py_import('cdms2', {})
cdutil = py_import('cdutil', {})
#local python modules
from cdat_window import QCDATWindow
from cdat_cell import QCDATWidget
from quickplot import quickplot |
import data
from utils import assert_403, assert_404, assert_200, parse_xml, xpath
PRD = 'prd'
def test_sharing(IndivoClient):
DS = 'ds'
def get_datastore(obj):
if hasattr(obj, DS):
return getattr(obj, DS).values()
return False
def set_datastore(obj, **kwargs):
if hasattr(obj, DS):
ds = getattr(obj, DS)
for kwarg, value in kwargs.items():
if hasattr(ds, kwarg):
setattr(ds, kwarg, value)
return obj
raise ValueError
def alice_setup(record_id, bob_account_id):
allergy_type = {'type' : 'http://indivo.org/vocab/xml/documents#Allergy'}
alice_chrome_client = IndivoClient('chrome', 'chrome')
alice_chrome_client.create_session(data.account)
alice_chrome_client.read_record(record_id=record_id)
alice_chrome_client.get_account_permissions(account_id=data.account['account_id'])
alice_chrome_client.get_account_records(account_id = data.account['account_id'])
# Alice posts a document
# (We save the first doc instead of zero
# due to the contact doc already in alice's account)
alice_chrome_client.post_document(data=data.doc01)
document_id = alice_chrome_client.read_documents().response[PRD]['Document'][1]
# Save the document_id in the client's datastore
alice_chrome_client.ds.document_id = document_id
# Save the first carenet_id in the client's datastore
carenet_id = alice_chrome_client.get_record_carenets().response[PRD]['Carenet'][0]
# post four documents to Alice's record, 2 allergies and 2 immunizations
document_1_id = xpath(parse_xml(alice_chrome_client.post_document(data=data.allergy00)), "/Document/@id")[0]
document_2_id = xpath(parse_xml(alice_chrome_client.post_document(data=data.allergy01)), "/Document/@id")[0]
document_3_id = xpath(parse_xml(alice_chrome_client.post_document(data=data.immunization)), "/Document/@id")[0]
document_4_id = xpath(parse_xml(alice_chrome_client.post_document(data=data.immunization2)), "/Document/@id")[0]
# and one more to test nevershare
document_5_id = xpath(parse_xml(alice_chrome_client.post_document(data=data.allergy02)), "/Document/@id")[0]
# auto-share allergies
alice_chrome_client.post_autoshare(data=allergy_type, carenet_id=carenet_id)
assert_200(alice_chrome_client.get_autoshare_bytype_all(record_id=record_id))
# unshare that one allergy, which should negate the autoshare
alice_chrome_client.delete_carenet_document(record_id = record_id, document_id = document_2_id, carenet_id=carenet_id)
# nevershare the third allergy
alice_chrome_client.document_nevershare_set(record_id = record_id, document_id = document_5_id)
# immunizations are individually shared (well only one of them)
alice_chrome_client.post_carenet_document(document_id = document_3_id, carenet_id=carenet_id)
# Alice shares her contact document(s) with the carenet
contact_doc = parse_xml(alice_chrome_client.read_documents(record_id = record_id, parameters={'type':'Contact'}))
for doc_id in xpath(contact_doc, '/Documents/Document/@id'):
alice_chrome_client.post_carenet_document(record_id = record_id, document_id = doc_id, carenet_id = carenet_id)
# Alice adds bob_account_id to carenet[0]
alice_chrome_client.post_carenet_account(carenet_id = carenet_id, data='account_id=' + bob_account_id + '&write=false')
# Review all accounts within carenet[0]
assert xpath(parse_xml(alice_chrome_client.get_carenet_accounts(carenet_id = carenet_id)), '/CarenetAccounts')
alice_chrome_client.get_carenet_apps(carenet_id = carenet_id)
alice_chrome_client.read_allergies(record_id = record_id)
# Finally, return the carenet_id, document_id
# in order to check Bob's access
# and a second document that is disallowed
return carenet_id, [document_1_id, doc | ument_3_id], [document_2_id, documen | t_4_id, document_5_id]
def bob_setup(bob_account_id, record_id, carenet_id, allowed_docs, disallowed_docs):
bob_chrome_client = IndivoClient('chrome', 'chrome')
bob_chrome_client.create_session(data.account02)
# SZ: Bob should NOT be able to read the docs directly in the record
for doc_id in allowed_docs+disallowed_docs:
assert_403(bob_chrome_client.read_document(record_id=record_id, document_id=doc_id))
assert_403(bob_chrome_client.get_record_carenets(record_id=record_id))
# Bob should be able to read the allowed docs
for doc_id in allowed_docs:
assert_200(bob_chrome_client.get_carenet_document(carenet_id = carenet_id, document_id = doc_id))
# Bob should not be able to read the disallowed docs
for doc_id in disallowed_docs:
assert_404(bob_chrome_client.get_carenet_document(carenet_id = carenet_id, document_id = doc_id))
# Bob should be able to list docs in the carenet
carenet_documents_list = bob_chrome_client.get_carenet_documents(carenet_id = carenet_id).response[PRD]['Document']
# with a parameter
carenet_documents_list = bob_chrome_client.get_carenet_documents(carenet_id = carenet_id, parameters={'type': 'http://indivo.org/vocab/xml/documents#Allergy'}).response[PRD]['Document']
# Read carenet allergies
assert_200(bob_chrome_client.read_carenet_allergies(carenet_id = carenet_id))
assert_200(bob_chrome_client.read_carenet_problems(carenet_id = carenet_id))
# Read the contact document, this should work
contact_doc = parse_xml(bob_chrome_client.read_carenet_special_document(carenet_id = carenet_id, special_document='contact'))
contact_name = xpath(contact_doc, '/ns:Contact/ns:name/ns:fullName/text()', namespaces={'ns':'http://indivo.org/vocab/xml/documents#'})
assert(contact_name)
bob_chrome_client.get_account_permissions(account_id=bob_account_id)
bob_chrome_client.get_carenet_account_permissions(carenet_id= carenet_id,
record_id=record_id,
account_id=bob_account_id)
# Not yet implemented
#bob_chrome_client.get_carenet_app_permissions(account_id=bob_account_id)
return True
def admin_setup(bob_account_id):
admin_client = IndivoClient(data.machine_app_email, data.machine_app_secret)
admin_client.set_app_id(data.app_email)
# Create a record for Alice and set her at the owner
record_id = admin_client.create_record(data=data.contact).response[PRD]['Record'][0]
admin_client.set_record_owner(data=data.account['account_id'])
# Create a basic set of carenets
carenet_names = ['Family2', 'Friends2', 'School/Office']
for cname in carenet_names:
admin_client.create_carenet(data='name=' + cname)
# Check to make sure the admin can list the carenets and the accounts within each one
carenets = xpath(parse_xml(admin_client.get_record_carenets(record_id = record_id)),'/Carenets/Carenet/@id')
for carenet_id in carenets:
assert len(xpath(parse_xml(admin_client.get_carenet_accounts(carenet_id = carenet_id)), '/CarenetAccounts')) > 0
return record_id
bob_account_id = 'benadida@informedcohort.org'
# Admin spawning carenets under Alice's newly created record
record_id = admin_setup(bob_account_id)
# Given Bob's account id and a record that has been set up for her
# Alice gives Bob the document_id that she'd like to share with him
# Even though Alice gives Bob a document_id, Bob has the ability
# to read all documents within the carenet that Alice added him to
# 2010-09-13 now Alice also shares her contact URL and we check
# that Bob can read it at the special URL
carenet_id, allowed_documents, disallowed_documents = alice_setup(record_id, bob_account_id)
return bob_setup(bob_account_id, record_id, carenet_id, allowed_documents, disallowed_documents)
|
import re
import time
import isodate
import requests
from cloudbot import hook
from cloudbot.util import timeformat
from cloudbot.util.formatting import pluralize
from cloudbot.util.colors import parse
youtube_re = re.compile(r'(?:youtube.*?(?:v=|/v/)|youtu\.be/|yooouuutuuube.*?id=)([-_a-zA-Z0-9]+)', re.I)
base_url = 'https://www.googleapis.com/youtube/v3/'
api_url = base_url + 'videos?part=contentDetails%2C+snippet%2C+statistics&id={}&key={}'
search_api_url = base_url + 'search?part=id&maxResults=1'
playlist_api_url = base_url + 'playlists?part=snippet%2CcontentDetails%2Cstatus'
video_url = "http://youtu.be/%s"
err_no_api = "The YouTube API is off in the Google Developers Console."
time_last_request = time.time()
def get_video_description(video_id):
global time_last_request
time_elapsed = time.time() - time_last_request
if time_elapsed > 10:
time_last_request = time.time()
else:
#return "This looks like a YouTube video. However, the YT api have been called too much, I'm sorry I won't be able to fetch details for you."
return None
json = requests.get(api_url.format(video_id, dev_key)).json()
if json.get('error'):
if json['error']['code'] == 403:
return err_no_api
else:
return
data = json['items']
snippet = data[0]['snippet']
statistics = data[0]['statistics']
content_details = data[0]['contentDetails']
out = '\x02{}\x02'.format(snippet['title'])
if not content_details.get('duration'):
return out
length = isodate.parse_duration(content_details['duration'])
out += ' - length \x02{}\x02'.format(timeformat.format_time(int(length.total_seconds()), simple=True))
total_votes = float(statistics['likeCount']) + float(statistics['dislikeCount'])
if total_votes != 0:
# format
likes = pluralize(int(statistics['likeCount']), "like")
dislikes = pluralize(int(statistics['dislikeCount']), "dislike")
percent = 100 * float(statistics['likeCount']) / total_votes
likes = parse("$(dark_green)" + likes + "$(clear)")
dislikes = parse("$(dark_red)" + dislikes + "$(clear)")
out += ' - {}, {} (\x02{:.1f}\x02%)'.format(likes,
dislikes, percent)
if 'viewCount' in statistics:
views = int(statistics['viewCount'])
out += ' - \x02{:,}\x02 view{}'.format(views, "s"[views == 1:])
uploader = snippet['channelTitle']
upload_time = time.strptime(snippet['publishedAt'], "%Y-%m-%dT%H:%M:%S.000Z")
out += ' - \x02{}\x02 on \x02{}\x02'.format(uploader,
time.strftime("%Y.%m.%d", upload_time))
if 'contentRating' in content_details:
out += ' - \x034NSFW\x02'
# return re.sub(
# r'( | ?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))',
# '[URL]', out)
return out.replace("youtu", "you tu") #nup. No spam please
@hook.on_start()
def load_key(bot):
global dev_key
dev_key = bot.config.get("api_keys", {}).get("google_dev_key", None)
@hook.regex(youtube_re)
def yout | ube_url(match, event):
if event.chan == "#harmonyhosting": # if the channel is #harmonyhosting
return None # return None, canceling the action
return get_video_description(match.group(1))
@hook.command("youtube", "you", "yt", "y")
def youtube(text):
"""youtube <query> -- Returns the first YouTube search result for <query>."""
if not dev_key:
return "This command requires a Google Developers Console API key."
json = requests.get(search_api_url, params={"q": text, "key": dev_key, "type": "video"}).json()
if json.get('error'):
if json['error']['code'] == 403:
return err_no_api
else:
return 'Error performing search.'
if json['pageInfo']['totalResults'] == 0:
return 'No results found.'
video_id = json['items'][0]['id']['videoId']
return get_video_description(video_id) + " - " + video_url % video_id
@hook.command("youtime", "ytime")
def youtime(text):
"""youtime <query> -- Gets the total run time of the first YouTube search result for <query>."""
if not dev_key:
return "This command requires a Google Developers Console API key."
json = requests.get(search_api_url, params={"q": text, "key": dev_key, "type": "video"}).json()
if json.get('error'):
if json['error']['code'] == 403:
return err_no_api
else:
return 'Error performing search.'
if json['pageInfo']['totalResults'] == 0:
return 'No results found.'
video_id = json['items'][0]['id']['videoId']
json = requests.get(api_url.format(video_id, dev_key)).json()
if json.get('error'):
return
data = json['items']
snippet = data[0]['snippet']
content_details = data[0]['contentDetails']
statistics = data[0]['statistics']
if not content_details.get('duration'):
return
length = isodate.parse_duration(content_details['duration'])
l_sec = int(length.total_seconds())
views = int(statistics['viewCount'])
total = int(l_sec * views)
length_text = timeformat.format_time(l_sec, simple=True)
total_text = timeformat.format_time(total, accuracy=8)
return 'The video \x02{}\x02 has a length of {} and has been viewed {:,} times for ' \
'a total run time of {}!'.format(snippet['title'], length_text, views,
total_text)
ytpl_re = re.compile(r'(.*:)//(www.youtube.com/playlist|youtube.com/playlist)(:[0-9]+)?(.*)', re.I)
@hook.regex(ytpl_re)
def ytplaylist_url(match, event):
global time_last_request
time_elapsed = time.time() - time_last_request
if time_elapsed > 10:
time_last_request = time.time()
else:
#return "This looks like a YouTube Playlist. However, the YT api have been called too much, I'm sorry I won't be able to fetch details for you."
return None
if event.chan == "#harmonyhosting": # if the channel is #harmonyhosting
return None # return None, canceling the action
location = match.group(4).split("=")[-1]
json = requests.get(playlist_api_url, params={"id": location, "key": dev_key}).json()
if json.get('error'):
if json['error']['code'] == 403:
return err_no_api
else:
return 'Error looking up playlist.'
data = json['items']
snippet = data[0]['snippet']
content_details = data[0]['contentDetails']
title = snippet['title']
author = snippet['channelTitle']
num_videos = int(content_details['itemCount'])
count_videos = ' - \x02{:,}\x02 video{}'.format(num_videos, "s"[num_videos == 1:])
return "\x02{}\x02 {} - \x02{}\x02".format(title, count_videos, author)
|
ut only contributes one item to the RDD: `(1, 'black')`, while `'mouse'` also appears twice and contributes two items: `(0, 'mouse')` and `(2, 'mouse')`. Use [flatMap](https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.flatMap) and [distinct](https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.distinct).
# In[15]:
# TODO: Replace <FILL IN> with appropriate code
sampleDistinctFeats = (sampleDataRDD.
flatMap(lambda x : x).distinct())
# In[16]:
# TEST Pair RDD of (featureID, category) (2a)
Test.assertEquals(sorted(sampleDistinctFeats.collect()),
[(0, 'bear'), (0, 'cat'), (0, 'mouse'), (1, 'black'),
(1, 'tabby'), (2, 'mouse'), (2, 'salmon')],
'incorrect value for sampleDistinctFeats')
# #### ** (2b) OHE Dictionary from distinct features **
# #### Next, create an `RDD` of key-value tuples, where each `(featureID, category)` tuple in `sampleDistinctFeats` is a key and the values are distinct integers ranging from 0 to (number of keys - 1). Then convert this `RDD` into a dictionary, which can be done using the `collectAsMap` action. Note that there is no unique mapping from keys to values, as all we require is that each `(featureID, category) | ` key be mapped to a unique integer between 0 and the number of keys. In this exercise, any valid mapping is acceptable. Use [zipWithIndex](https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.zipWithIndex) followed by [collectAsMap](https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.collectAsMap).
# #### In our sample dataset, one valid list of key-value tuples is: `[((0, 'bear'), 0), ((2, 'salmon'), 1), ((1, 'tabby'), 2), | ((2, 'mouse'), 3), ((0, 'mouse'), 4), ((0, 'cat'), 5), ((1, 'black'), 6)]`. The dictionary defined in Part (1a) illustrates another valid mapping between keys and integers.
# In[17]:
# TODO: Replace <FILL IN> with appropriate code
sampleOHEDict = (sampleDistinctFeats.
zipWithIndex().collectAsMap())
print sampleOHEDict
# In[18]:
# TEST OHE Dictionary from distinct features (2b)
Test.assertEquals(sorted(sampleOHEDict.keys()),
[(0, 'bear'), (0, 'cat'), (0, 'mouse'), (1, 'black'),
(1, 'tabby'), (2, 'mouse'), (2, 'salmon')],
'sampleOHEDict has unexpected keys')
Test.assertEquals(sorted(sampleOHEDict.values()), range(7), 'sampleOHEDict has unexpected values')
# #### **(2c) Automated creation of an OHE dictionary **
# #### Now use the code from Parts (2a) and (2b) to write a function that takes an input dataset and outputs an OHE dictionary. Then use this function to create an OHE dictionary for the sample dataset, and verify that it matches the dictionary from Part (2b).
# In[19]:
# TODO: Replace <FILL IN> with appropriate code
def createOneHotDict(inputData):
"""Creates a one-hot-encoder dictionary based on the input data.
Args:
inputData (RDD of lists of (int, str)): An RDD of observations where each observation is
made up of a list of (featureID, value) tuples.
Returns:
dict: A dictionary where the keys are (featureID, value) tuples and map to values that are
unique integers.
"""
return (inputData.flatMap(lambda x : x).distinct().zipWithIndex().collectAsMap())
sampleOHEDictAuto = createOneHotDict(sampleDataRDD)
print sampleOHEDictAuto
# In[20]:
# TEST Automated creation of an OHE dictionary (2c)
Test.assertEquals(sorted(sampleOHEDictAuto.keys()),
[(0, 'bear'), (0, 'cat'), (0, 'mouse'), (1, 'black'),
(1, 'tabby'), (2, 'mouse'), (2, 'salmon')],
'sampleOHEDictAuto has unexpected keys')
Test.assertEquals(sorted(sampleOHEDictAuto.values()), range(7),
'sampleOHEDictAuto has unexpected values')
# ### **Part 3: Parse CTR data and generate OHE features**
# #### Before we can proceed, you'll first need to obtain the data from Criteo. If you have already completed this step in the setup lab, just run the cells below and the data will be loaded into the `rawData` variable.
# #### Below is Criteo's data sharing agreement. After you accept the agreement, you can obtain the download URL by right-clicking on the "Download Sample" button and clicking "Copy link address" or "Copy Link Location", depending on your browser. Paste the URL into the `# TODO` cell below. The file is 8.4 MB compressed. The script below will download the file to the virtual machine (VM) and then extract the data.
# #### If running the cell below does not render a webpage, open the [Criteo agreement](http://labs.criteo.com/downloads/2014-kaggle-display-advertising-challenge-dataset/) in a separate browser tab. After you accept the agreement, you can obtain the download URL by right-clicking on the "Download Sample" button and clicking "Copy link address" or "Copy Link Location", depending on your browser. Paste the URL into the `# TODO` cell below.
# #### Note that the download could take a few minutes, depending upon your connection speed.
# In[21]:
# Run this code to view Criteo's agreement
from IPython.lib.display import IFrame
IFrame("http://labs.criteo.com/downloads/2014-kaggle-display-advertising-challenge-dataset/",
600, 350)
# In[22]:
# TODO: Replace <FILL IN> with appropriate code
# Just replace <FILL IN> with the url for dac_sample.tar.gz
import glob
import os.path
import tarfile
import urllib
import urlparse
# Paste url, url should end with: dac_sample.tar.gz
url = 'http://labs.criteo.com/wp-content/uploads/2015/04/dac_sample.tar.gz'
url = url.strip()
baseDir = os.path.join('data')
inputPath = os.path.join('cs190', 'dac_sample.txt')
fileName = os.path.join(baseDir, inputPath)
inputDir = os.path.split(fileName)[0]
def extractTar(check = False):
# Find the zipped archive and extract the dataset
tars = glob.glob('dac_sample*.tar.gz*')
if check and len(tars) == 0:
return False
if len(tars) > 0:
try:
tarFile = tarfile.open(tars[0])
except tarfile.ReadError:
if not check:
print 'Unable to open tar.gz file. Check your URL.'
return False
tarFile.extract('dac_sample.txt', path=inputDir)
print 'Successfully extracted: dac_sample.txt'
return True
else:
print 'You need to retry the download with the correct url.'
print ('Alternatively, you can upload the dac_sample.tar.gz file to your Jupyter root ' +
'directory')
return False
if os.path.isfile(fileName):
print 'File is already available. Nothing to do.'
elif extractTar(check = True):
print 'tar.gz file was already available.'
elif not url.endswith('dac_sample.tar.gz'):
print 'Check your download url. Are you downloading the Sample dataset?'
else:
# Download the file and store it in the same directory as this notebook
try:
urllib.urlretrieve(url, os.path.basename(urlparse.urlsplit(url).path))
except IOError:
print 'Unable to download and store: {0}'.format(url)
extractTar()
# In[23]:
import os.path
baseDir = os.path.join('data')
inputPath = os.path.join('cs190', 'dac_sample.txt')
fileName = os.path.join(baseDir, inputPath)
if os.path.isfile(fileName):
rawData = (sc
.textFile(fileName, 2)
.map(lambda x: x.replace('\t', ','))) # work with either ',' or '\t' separated data
print rawData.take(1)
# #### **(3a) Loading and splitting the data **
# #### We are now ready to start working with the actual CTR data, and our first task involves splitting it into training, validation, and test sets. Use the [randomSplit method](https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.randomSplit) with the specified weights and seed to create RDDs storing each of these datasets, and then [cache](https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.cache) each of these RDDs, as we will be accessing them multiple times in the remainder of this lab. Finally, compute the size of each |
# coding:utf-8
'''
Author : qbeenslee
Created : 15/4/3
'''
UID = 'uid'
TOKEN = 'token'
CLIENT = 'client'
DESCRIPTION = 'description'
EMAIL = 'email'
WHAT = 'what'
IMEI = 'imei'
USERNAME = 'nickname'
PWD = 'pwd'
IM | AGE_FILES = 'imagefiles'
MSG = 'msg'
VERSION = 'version'
URL = 'url'
NICKNAME = 'nickname'
MOTTO = 'motto'
AVATAR = 'avatar'
WALLPAPER = 'wallpaper'
VERIFY_STATUS = 'verify_status'
LATITUDE = 'latitude'
LONGITUDE = 'longitude'
HIDE_LOCATION = 'hide_location'
PLACE_NAME = 'place_name'
RADIUS = | 'radius'
LIMIT = 'limit'
OFFSET = 'offset'
PAGE = 'page'
PAGE_SIZE = 'page_size'
TOTAL_COUNT = 'total_count'
SORT_TYPE = 'sort_type'
CONTENT = 'content'
SID = 'sid'
HEIGHT = "height"
WIDTH = "width"
OPERATE='operate'
|
#!/usr/bin/env python3
"""
Single-threaded HTTP server that extracts article's HTML from a full page HTML.
Accepts POST requests to "/extract" endpoint with body JSON:
{
"html": "<html><title>Title</title><body><p>Paragraph.</p></html>"
}
On success, returns HTTP 200 and extracted HTML:
{
"extracted_html": "Title\n\n<body id=\"readabilityBody\"><p>Paragraph.</p></body>",
"extractor_version": "readability-lxml-0.6.1"
}
On errors, returns HTTP 4xx / 5xx and error message:
{
"error": "You're using it wrong."
}
"""
import argparse
from http import HTTPStatus
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import urlparse
from mediawords.util.parse_json import encode_json, decode_json
from mediawords.util.log import create_logger
from extract_article_from_page import extract_article_from_page, extractor_name
log = create_logger(__name__)
_MAX_HTML_LENGTH = 4 * 1024 * 1024
"""Extractor will refuse to extract HTML pages bigger than this."""
_MAX_REQUEST_LENGTH = _MAX_HTML_LENGTH + (10 * 1024)
"""HTTP server will refuse to serve requests larger than this."""
class ServerHandler(BaseHTTPRequestHandler):
# Allow HTTP/1.1 connections and so don't wait up on "Expect:" headers
protocol_version = "HTTP/1.1"
_API_ENDPOINT_PATH = "/extract"
def __json_response(self, status: int, response: dict) -> bytes:
json_response = encode_json(response)
encoded_json_response = json_response.encode("UTF-8", errors="replace")
self.send_response(status)
self.send_header("Content-Type", "application/json; charset=UTF-8")
self.send_header("Content-Length", len(encoded_json_response))
self.end_headers()
return encoded_json_response
def __error_response(self, status: int, message: str) -> bytes:
log.error(message)
return self.__json_response(status=status, response={"error": message})
def __success_response(self, status: int, response: dict) -> bytes:
response = self.__json_response(status=status, response=response)
log.info(f"Returning response ({len(response)} bytes)")
return response
def __post(self) -> bytes:
uri = urlparse(self.path)
if uri.path != self._API_ENDPOINT_PATH:
return self.__error_response(
status=HTTPStatus.NOT_FOUND.value,
message=f"Only {self._API_ENDPOINT_PATH} is implemented.",
)
content_length = int(self.headers.get('Content-Length', 0))
log.info(f"Received extraction request ({content_length} bytes)...")
if not content_length:
return self.__error_response(
status=HTTPStatus.LENGTH_REQUIRED.value,
message="Content-Length header is not set.",
)
if content_length > _MAX_REQUEST_LENGTH:
return self.__error_response(
status=HTTPStatus.REQUEST_ENTITY_TOO_LARGE.value,
message=f"Request is larger than {_MAX_REQUEST_LENGTH} bytes."
)
encoded_body = self.rfile.read(content_length)
try:
json_body = encoded_body.decode('utf-8', errors='replace')
except Exception as ex:
return self.__error_response(
status=HTTPStatus.BAD_REQUEST.value,
message=f"Unable to decode request body: {ex}",
)
try:
body = decode_json(json_body)
except Exception as ex:
return self.__error_response(
status=HTTPStatus.BAD_REQUEST.value,
mes | sage=f"Unable to decode request JSON: {ex}",
)
if "html" not in body:
| return self.__error_response(
status=HTTPStatus.BAD_REQUEST.value,
message="Request JSON doesn't have 'html' key.",
)
html = body["html"]
try:
extracted_html = extract_article_from_page(html)
except Exception as ex:
return self.__error_response(
status=HTTPStatus.BAD_REQUEST.value,
message=f"Unable to extract article HTML from page HTML: {ex}"
)
response = {
'extracted_html': extracted_html,
'extractor_version': extractor_name(),
}
return self.__success_response(
status=HTTPStatus.OK.value,
response=response,
)
# noinspection PyPep8Naming
def do_POST(self) -> None:
self.wfile.write(self.__post())
# noinspection PyPep8Naming
def do_GET(self):
return self.__error_response(
status=HTTPStatus.METHOD_NOT_ALLOWED.value,
message="Try POST instead!",
)
def start_http_server(port: int) -> None:
"""Start HTTP server."""
log.info(f"Listening on port {port}...")
server = HTTPServer(('', port), ServerHandler)
try:
server.serve_forever()
except KeyboardInterrupt:
pass
log.info("Shutting down...")
server.server_close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Start page HTML -> article HTML extraction HTTP server.")
parser.add_argument("-p", "--port", type=int, default=80, help="Port to listen to")
args = parser.parse_args()
start_http_server(port=args.port)
|
V | ERSION = '0.3.4' | |
e_password()
@_manager.monkeypatch(USER_PATH)
def has_usable_password(user):
return is_password_usable(user.password)
#
# patch ``User.set_password() & ``User.check_password()`` to use
# context & get_category (would just leave these as wrappers for hashers
# module under django 1.4, but then we couldn't pass User object into
# get_category very easily)
#
@_manager.monkeypatch(USER_PATH)
def set_password(user, password):
"passlib replacement for User.set_password()"
if is_valid_secret(password):
# NOTE: pulls _get_category from module globals
cat = _get_category(user)
user.password = password_context.encrypt(password, category=cat)
else:
user.set_unusable_password()
@_manager.monkeypatch(USER_PATH)
def check_password(user, password):
"passlib replacement for User.check_password()"
hash = user.password
if not is_valid_secret(password) or not is_password_usable(hash):
return False
if not hash and VERSION < (1,4):
return False
# NOTE: pulls _get_category from module globals
cat = _get_category(user)
ok, new_hash = password_context.verify_and_update(password, hash,
category=cat)
if ok and new | _hash is not None:
# migrate to new hash if needed.
user.password = new_hash
user.save()
return ok
#
# override check_password() with our own implementation
#
@_manager.monkeypatch(HASHERS_PATH, enable=has_hashers)
@_manager.monkeypatch(MODELS_PATH)
def check_password(password, encoded, sette | r=None, preferred="default"):
"passlib replacement for check_password()"
# XXX: this currently ignores "preferred" keyword, since it's purpose
# was for hash migration, and that's handled by the context.
if not is_valid_secret(password) or not is_password_usable(encoded):
return False
ok = password_context.verify(password, encoded)
if ok and setter and password_context.needs_update(encoded):
setter(password)
return ok
#
# patch the other functions defined in the ``hashers`` module, as well
# as any other known locations where they're imported within ``contrib.auth``
#
if has_hashers:
@_manager.monkeypatch(HASHERS_PATH)
@_manager.monkeypatch(MODELS_PATH)
def make_password(password, salt=None, hasher="default"):
"passlib replacement for make_password()"
if not is_valid_secret(password):
return make_unusable_password()
if hasher == "default":
scheme = None
else:
scheme = hasher_to_passlib_name(hasher)
kwds = dict(scheme=scheme)
handler = password_context.handler(scheme)
# NOTE: django make specify an empty string for the salt,
# even if scheme doesn't accept a salt. we omit keyword
# in that case.
if salt is not None and (salt or 'salt' in handler.setting_kwds):
kwds['salt'] = salt
return password_context.encrypt(password, **kwds)
@_manager.monkeypatch(HASHERS_PATH)
@_manager.monkeypatch(FORMS_PATH)
def get_hasher(algorithm="default"):
"passlib replacement for get_hasher()"
if algorithm == "default":
scheme = None
else:
scheme = hasher_to_passlib_name(algorithm)
# NOTE: resolving scheme -> handler instead of
# passing scheme into get_passlib_hasher(),
# in case context contains custom handler
# shadowing name of a builtin handler.
handler = password_context.handler(scheme)
return get_passlib_hasher(handler, algorithm=algorithm)
# identify_hasher() was added in django 1.5,
# patching it anyways for 1.4, so passlib's version is always available.
@_manager.monkeypatch(HASHERS_PATH)
@_manager.monkeypatch(FORMS_PATH)
def identify_hasher(encoded):
"passlib helper to identify hasher from encoded password"
handler = password_context.identify(encoded, resolve=True,
required=True)
algorithm = None
if (has_unsalted_sha1 and handler.name == "django_salted_sha1" and
encoded.startswith("sha1$$")):
# django 1.4.6+ uses a separate hasher for "sha1$$digest" hashes,
# but passlib just reuses the "sha1$salt$digest" handler.
# we want to resolve to correct django hasher.
algorithm = "unsalted_sha1"
return get_passlib_hasher(handler, algorithm=algorithm)
_patched = True
log.debug("... finished monkeypatching django")
def _remove_patch():
"""undo the django monkeypatching done by this module.
offered as a last resort if it's ever needed.
.. warning::
This may cause problems if any other Django modules have imported
their own copies of the patched functions, though the patched
code has been designed to throw an error as soon as possible in
this case.
"""
global _patched
if _patched:
log.debug("removing django monkeypatching...")
_manager.unpatch_all(unpatch_conflicts=True)
password_context.load({})
_patched = False
log.debug("...finished removing django monkeypatching")
return True
if _manager: # pragma: no cover -- sanity check
log.warning("reverting partial monkeypatching of django...")
_manager.unpatch_all()
password_context.load({})
log.debug("...finished removing django monkeypatching")
return True
log.debug("django not monkeypatched")
return False
#=============================================================================
# main code
#=============================================================================
def _load():
global _get_category
# TODO: would like to add support for inheriting config from a preset
# (or from existing hasher state) and letting PASSLIB_CONFIG
# be an update, not a replacement.
# TODO: wrap and import any custom hashers as passlib handlers,
# so they could be used in the passlib config.
# load config from settings
_UNSET = object()
config = getattr(settings, "PASSLIB_CONFIG", _UNSET)
if config is _UNSET:
# XXX: should probably deprecate this alias
config = getattr(settings, "PASSLIB_CONTEXT", _UNSET)
if config is _UNSET:
config = "passlib-default"
if config is None:
warn("setting PASSLIB_CONFIG=None is deprecated, "
"and support will be removed in Passlib 1.8, "
"use PASSLIB_CONFIG='disabled' instead.",
DeprecationWarning)
config = "disabled"
elif not isinstance(config, (unicode, bytes, dict)):
raise ExpectedTypeError(config, "str or dict", "PASSLIB_CONFIG")
# load custom category func (if any)
get_category = getattr(settings, "PASSLIB_GET_CATEGORY", None)
if get_category and not callable(get_category):
raise ExpectedTypeError(get_category, "callable", "PASSLIB_GET_CATEGORY")
# check if we've been disabled
if config == "disabled":
if _patched: # pragma: no cover -- sanity check
log.error("didn't expect monkeypatching would be applied!")
_remove_patch()
return
# resolve any preset aliases
if isinstance(config, str) and '\n' not in config:
config = get_preset_config(config)
# setup context
_apply_patch()
password_context.load(config)
if get_category:
# NOTE: _get_category is module global which is read by
# monkeypatched functions constructed by _apply_patch()
_get_category = get_category
log.debug(" |
hyperparameters.)
.. seealso::
:obj:`~gpytorch.variational.OrthogonallyDecoupledVariationalStrategy` (a variant proposed by
`Salimbeni et al. (2018)`_ that uses orthogonal projections.)
:param ~gpytorch.models.ApproximateGP model: Model this strategy is applied to.
Typically passed in when the VariationalStrategy is created in the
__init__ method of the user defined model.
:param torch.Tensor inducing_points: Tensor containing a set of inducing
points to use for variational inference.
:param ~gpytorch.variational.VariationalDistribution variational_distribution: A
VariationalDistribution object that represents the form of the variational distribution :math:`q(\mathbf u)`
:param learn_inducing_locations: (Default True): Whether or not
the inducing point locations :math:`\mathbf Z` should be learned (i.e. are they
parameters of the model).
:type learn_inducing_locations: `bool`, optional
:type mean_var_batch_dim: `int`, optional
:param mean_var_batch_dim: (Default `None`):
Set this parameter (ideally to `-1`) to indicate which dimension corresponds to different
kernel hyperparameters for the mean/variance functions.
.. _Cheng et al. (2017):
https://arxiv.org/abs/1711.10127
.. _Salimbeni et al. (2018):
https://arxiv.org/abs/1809.08820
.. _Jankowiak et al. (2020):
https://arxiv.org/abs/1910.07123
Example (**different** hypers for mean/variance):
>>> class MeanFieldDecoupledModel(gpytorch.models.ApproximateGP):
>>> '''
>>> A batch of 3 independent MeanFieldDecoupled PPGPR models.
>>> '''
>>> def __init__(self, inducing_points):
>>> # The variational parameters have a batch_shape of [3]
>>> variational_distribution = gpytorch.variational.MeanFieldVariationalDistribution(
>>> inducing_points.size(-1), batch_shape=torch.Size([3]),
>>> )
>>> variational_strategy = gpytorch.variational.BatchDecoupledVariationalStrategy(
>>> self, inducing_points, variational_distribution, learn_inducing_locations=True,
>>> mean_var_batch_dim=-1
>>> )
>>>
>>> # The mean/covar modules have a batch_shape of [3, 2]
>>> # where the last batch dim corresponds to the mean & variance hyperparameters
>>> super().__init__(variational_strategy)
>>> self.mean_module = gpytorch.means.ConstantMean(batch_shape=torch.Size([3, 2]))
>>> self.covar_module = gpytorch.kernels.ScaleKernel(
>>> gpytorch.kernels.RBFKernel(batch_shape=torch.Size([3, 2])),
>>> batch_shape=torch.Size([3, 2]),
>>> )
Example (**shared** hypers for mean/variance):
>>> class MeanFieldDecoupledModel(gpytorch.models.ApproximateGP):
>>> '''
>>> A batch of 3 independent MeanFieldDecoupled PPGPR models.
>>> '''
>>> def __init__(self, inducing_points):
>>> # The variational parameters have a batch_shape of [3]
>>> variational_distribution = gpytorch.variational.MeanFieldVariationalDistribution(
>>> inducing_points.size(-1), batch_shape=torch.Size([3]),
>>> )
>>> variational_strategy = gpytorch.variational.BatchDecoupledVariationalStrategy(
>>> self, inducing_points, variational_distribution, learn_inducing_locations=True,
>>> )
>>>
>>> # The mean/covar modules have a batch_shape of [3]
>>> super().__init__(variational_strategy)
>>> self.mean_module = gpytorch.means.ConstantMean(batch_shape=torch.Size([3]))
>>> self.covar_module = gpytorch.kernels.ScaleKernel(
>>> gpytorch.kernels.RBFKernel(batch_shape=torch.Size([3])),
>>> batch_shape=torch.Size([3]),
>>> )
"""
def __init__(
self, model, inducing_points, variational_distribution, learn_inducing_locations=True, mean_var_batch_dim=None
):
if isinstance(variational_distribution, DeltaVariationalDistribution):
raise NotImplementedError(
"BatchDecoupledVariationalSt | rategy does not work with DeltaVariationalDistribution"
)
if mean_var_batch_dim is not None and mean_var_batch_dim >= 0:
raise ValueError( | f"mean_var_batch_dim should be negative indexed, got {mean_var_batch_dim}")
self.mean_var_batch_dim = mean_var_batch_dim
# Maybe unsqueeze inducing points
if inducing_points.dim() == 1:
inducing_points = inducing_points.unsqueeze(-1)
# We're going to create two set of inducing points
# One set for computing the mean, one set for computing the variance
if self.mean_var_batch_dim is not None:
inducing_points = torch.stack([inducing_points, inducing_points], dim=(self.mean_var_batch_dim - 2))
else:
inducing_points = torch.stack([inducing_points, inducing_points], dim=-3)
super().__init__(model, inducing_points, variational_distribution, learn_inducing_locations)
def _expand_inputs(self, x, inducing_points):
# If we haven't explicitly marked a dimension as batch, add the corresponding batch dimension to the input
if self.mean_var_batch_dim is None:
x = x.unsqueeze(-3)
else:
x = x.unsqueeze(self.mean_var_batch_dim - 2)
return super()._expand_inputs(x, inducing_points)
def forward(self, x, inducing_points, inducing_values, variational_inducing_covar=None):
# We'll compute the covariance, and cross-covariance terms for both the
# pred-mean and pred-covar, using their different inducing points (and maybe kernel hypers)
mean_var_batch_dim = self.mean_var_batch_dim or -1
# Compute full prior distribution
full_inputs = torch.cat([inducing_points, x], dim=-2)
full_output = self.model.forward(full_inputs)
full_covar = full_output.lazy_covariance_matrix
# Covariance terms
num_induc = inducing_points.size(-2)
test_mean = full_output.mean[..., num_induc:]
induc_induc_covar = full_covar[..., :num_induc, :num_induc].add_jitter()
induc_data_covar = full_covar[..., :num_induc, num_induc:].evaluate()
data_data_covar = full_covar[..., num_induc:, num_induc:]
# Compute interpolation terms
# K_ZZ^{-1/2} K_ZX
# K_ZZ^{-1/2} \mu_Z
L = self._cholesky_factor(induc_induc_covar)
if L.shape != induc_induc_covar.shape:
# Aggressive caching can cause nasty shape incompatibilies when evaluating with different batch shapes
# TODO: Use a hook to make this cleaner
try:
pop_from_cache_ignore_args(self, "cholesky_factor")
except CachingError:
pass
L = self._cholesky_factor(induc_induc_covar)
interp_term = L.inv_matmul(induc_data_covar.double()).to(full_inputs.dtype)
mean_interp_term = interp_term.select(mean_var_batch_dim - 2, 0)
var_interp_term = interp_term.select(mean_var_batch_dim - 2, 1)
# Compute the mean of q(f)
# k_XZ K_ZZ^{-1/2} m + \mu_X
# Here we're using the terms that correspond to the mean's inducing points
predictive_mean = torch.add(
torch.matmul(mean_interp_term.transpose(-1, -2), inducing_values.unsqueeze(-1)).squeeze(-1),
test_mean.select(mean_var_batch_dim - 1, 0),
)
# Compute the covariance of q(f)
# K_XX + k_XZ K_ZZ^{-1/2} (S - I) K_ZZ^{-1/2} k_ZX
middle_term = self.prior_distribution.lazy_covariance_matrix.mul(-1)
if variational_inducing_covar is not None:
middle_term = SumLazyTensor(variational_inducing_covar, middle_term)
predictive_covar = SumLazyTensor(
|
from . import *
class TestTemplateUse(TestCase):
| def test_resized_img_src(self):
@self.app.route('/resized_img_src')
def use():
return render_template_string('''
<img src="{{ resized_img_src('cc.png') }}" />
'''.strip())
res = self.client.get('/resized_img_src')
self.assert | 200(res)
self.assertIn('src="/imgsizer/cc.png?', res.data)
def test_url_for(self):
@self.app.route('/url_for')
def use():
return render_template_string('''
<img src="{{ url_for('images', filename='cc.png') }}" />
'''.strip())
res = self.client.get('/url_for')
self.assert200(res)
self.assertIn('src="/imgsizer/cc.png?', res.data)
|
import numpy as np
from scipy.signal import medfilt
import manager.operations.method as method
from manager.operations.methodsteps.confirmation import Confirmation
from manager.exceptions import VoltPyNotAllowed
class MedianFilter(method.ProcessingMethod):
can_be_applied = True
_steps = [
{
'class': Confirmation,
'title': 'Apply median filter',
'desc': 'Press Forward to apply Median Filter.',
},
]
description = """
Median filter is smoothing algorithm similar to the Savitzky-Golay, however instead of fitting of the polynomial,
the middle point of the window is moved to the value of median of the points in the window. The median filter is
most usefull for removal of spikes from the signal (single point, large amplitude errors).
"""
@classmethod
def __str__(cls):
return "Median Filter"
def apply(self, user, dataset):
if self.model.completed is | not True:
raise VoltPyNotAllowed('Incomplete procedure.')
self.__perform(dataset)
def __perform( | self, dataset):
for cd in dataset.curves_data.all():
yvec = cd.yVector
newyvec = medfilt(yvec)
dataset.updateCurve(self.model, cd, newyvec)
dataset.save()
def finalize(self, user):
self.__perform(self.model.dataset)
self.model.step = None
self.model.completed = True
self.model.save()
return True
|
docs = """django-angular-scaffold
=======================
[](https://travis-ci.org/mc706/django-angular-scaffold)
[](http://badge.fury.io/py/django-angular-scaffold)
[](https://landscape.io/github/mc706/django-angular-scaffold/master)
[](https://coveralls.io/r/mc706/django-angular-scaffold)
set of django management commands to scaffold a django + angular project
##Installation
Install using pip
```
pip install django-angular-scaffold
```
include in your INSTALLED_APPS
```
#settings.py
...
INSTALLED_APPS = (
...
'angular_scaffold',
...
)
```
##Commands
The following are commands that are made available through this package.
###scaffold
```
./manage.py scaffold
```
Builds a assets folder structure in the following structure:
```
/assets
+ - app
| + - config
| + - controllers
| + - directives
| + - services
| + - views
| + - app.js
+ - lib
+ - fonts
+ - scripts
+ - styles
+ - site
| + - _global.scss
| + - _mixins.scss
| + - _variables.scss
+ - vendor
+ styles.scss
```
It will prompt for an application name, this will add start the angular app off.
It also automatically setups the `styles.scss` to import the pre stubbed out globals, mixins, and variables files.
The rest of the folders are stubbed out with a `.gitkeep` file to allow the directory structure to be added to git.
###startview
```
./manage.py startview <viewname>
```
creates new view, creates new styles and adds it to the import
Can accept a path. The following are valid viewname arguments
```
./manage startview homepage
./manage startview home-page
./manage startview ticket/new
./manage startview settings/options/test
```
This will create a view file in the appropriate folder, create a mirrored scss file in the site directory, and
import the style into the main styles.scss file.
###generatedocs
```
./manage.py generatedocs
```
Adds a `/docs` folder and copies some basic documentation into it
###createdebugger
```
./manage.py createdebugger <password>
```
Creates a config file for angular that overrides console.log and replaces it with
$log.debug. Then disables $log.debug unless a query string with an encoded password
is included.
This makes it very easy to debug your application without having to expose the underlying
to the users. It also allows you to keep your logging statements in your | app when going to
production, as they are turned off and hidden by default.
###startservice
```
./manage.py startservice <service_name>
```
Creates a starter service. Will ask for the endpoint, and the pluralization of the service name,
and will create list, get, post, put, and delete methods for th | at service.
###startcontroller
```
./manage.py startcontroller <controller_name>
```
Creates a new empty controller in controllers directory.
###createcsrf
```
./manage.py createcsrf
```
Adds the csrf token to your angular ajax headers in a config file. Also injects the `ngCookies` dependency into your app.
###startroutes
```
./manage.py startroutes
```
Adds a routes config file and inject ngRoute as a dependency.
Creates a defaulted route to `/` using `HomeController` and `views/home.html`.
###addroute
```
./manage.py addroute
```
Adds a route to the routes. Will prompt for url, controller, views, and a number of variables to resolve.
* `when:` - put in the url in angular notation for this route example: `/tickets/:ticket/edit/`
* `controller:` - the short name of the controller example: `ticket = TicketController`
* `view: ` - path relative to the views folder to the html template example: `tickets/ticket.html`
* `resolve` - name of variable to resolve into controller
###createlimiter
```
./manage.py createlimiter
```
Adds a runtime config that gives access to a `$rootScope.checkLimiter()` function that you can use in services
to limit the number of calls made. """ |
"""
Copyright (c) 2016 - Sean Bailey - All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distr | ibuted under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Django Imports
from django.core.urlresolvers import reverse
# Docs Imports
from docs.models import Article
class Ca | tegory:
"""
Article Category
"""
def __init__(self, title, description, url, colour, db_code):
self.title = title
self.description = description
self.url = url
self.colour = colour
self.db_code = db_code
def get_absolute_url(self):
return reverse("category", args=[self.url])
def recent_articles(self):
"""
Collects the most recent articles of a category.
"""
return Article.objects.filter(
category=self.db_code,
).order_by("last_edited")[:10]
NEWS = Category(
title = "News",
description = "Site news, changelogs and updates.",
url = "news",
colour = "#F44336",
db_code = "NE",
)
SUPPORT = Category(
title = "Support",
description = "Helping you understand Comet.",
url = "support",
colour = "#F45A36",
db_code = "SU",
)
DEVELOPER = Category(
title = "Developer",
description = "Developer logs, explanations and all things code.",
url = "developer",
colour = "#F47B36",
db_code = "DE",
)
COMMUNITY = Category(
title = "Community",
description = "For the community, by the community.",
url = "community",
colour = "#F49336",
db_code = "CO",
)
OTHER = Category(
title = "Other",
description = "Miscellaneous articles.",
url = "other",
colour = "#F4A336",
db_code = "OT",
)
CATEGORIES = [NEWS, SUPPORT, DEVELOPER, COMMUNITY, OTHER]
|
# -*- coding: utf-8 -*-
#
# Copyright © 2009-2010 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""Terminal emulation tools"""
import os
class ANSIEscapeCodeHandler(object):
"""ANSI Escape sequences handler"""
if os.name == 'nt':
# Windows terminal colors:
ANSI_COLORS = ( # Normal, Bright/Light
('#000000', '#808080'), # 0: black
('#800000', '#ff0000'), # 1: red
('#008000', '#00ff00'), # 2: green
('#808000', '#ffff00'), # 3: yellow
('#000080', '#0000ff'), # 4: blue
('#800080', '#ff00ff'), # 5: magenta
('#008080', '#00ffff'), # 6: cyan
('#c0c0c0', '#ffffff'), # 7: white
)
elif os.name == 'mac':
# Terminal.app colors:
ANSI_COLORS = ( # Normal, Bright/Light
('#000000', '#818383'), # 0: black
('#C23621', '#FC391F'), # 1: red
('#25BC24', '#25BC24'), # 2: green
('#ADAD27', '#EAEC23'), # 3: yellow
('#492EE1', '#5833FF'), # 4: blue
('#D338D3', '#F935F8'), # 5: magenta
('#33BBC8', '#14F0F0'), # 6: cyan
('#CBCCCD', '#E9EBEB'), # 7: white
)
else:
# xterm colors:
ANSI_COLORS = ( # Normal, Bright/Light
('#000000', '#7F7F7F'), # 0: black
('#CD0000', '#ff0000'), # 1: red
('#00CD00', '#00ff00'), # 2: green
('#CDCD00', '#ffff00'), # 3: y | ellow
('#0000EE', '#5C5CFF'), # 4: blue
('#CD00CD', '#ff00ff'), # 5: magenta
('#00CDCD', '#00ffff'), # 6: cyan
('#E5E5E5', '#ffffff'), # 7: white
)
def __init__(self):
self.intensity = 0
self.italic = None
| self.bold = None
self.underline = None
self.foreground_color = None
self.background_color = None
self.default_foreground_color = 30
self.default_background_color = 47
def set_code(self, code):
assert isinstance(code, int)
if code == 0:
# Reset all settings
self.reset()
elif code == 1:
# Text color intensity
self.intensity = 1
# The following line is commented because most terminals won't
# change the font weight, against ANSI standard recommendation:
# self.bold = True
elif code == 3:
# Italic on
self.italic = True
elif code == 4:
# Underline simple
self.underline = True
elif code == 22:
# Normal text color intensity
self.intensity = 0
self.bold = False
elif code == 23:
# No italic
self.italic = False
elif code == 24:
# No underline
self.underline = False
elif code >= 30 and code <= 37:
# Text color
self.foreground_color = code
elif code == 39:
# Default text color
self.foreground_color = self.default_foreground_color
elif code >= 40 and code <= 47:
# Background color
self.background_color = code
elif code == 49:
# Default background color
self.background_color = self.default_background_color
self.set_style()
def set_style(self):
"""
Set font style with the following attributes:
'foreground_color', 'background_color', 'italic',
'bold' and 'underline'
"""
raise NotImplementedError
def reset(self):
self.current_format = None
self.intensity = 0
self.italic = False
self.bold = False
self.underline = False
self.foreground_color = None
self.background_color = None
|
":", 1)[0]
self.name = name
self.interfaces = interfaces
self.mandatory_variants = []
self.optional_variants = []
self.variants = []
for variant in variants:
if variant[:1] == "!":
self.mandatory_variants.append(variant[1:])
else:
self.optional_variants.append(variant)
self.variants.append(variant)
self.score = score
self.func = func
self.variant_processor_func = None
def __repr__(self):
return "<%s id=%r variants=%r>" % (self.__class__.__name__, self.id, self.variants)
def _invoke(self, func, *args, **kwargs):
# We forcefully override strategy here. This lets a strategy
# function always access its metadata and directly forward it to
# subcomponents without having to filter out strategy.
kwargs["strategy"] = self
return func(*args, **kwargs)
def __call__(self, *args, **kwargs):
return self._invoke(self.func, *args, **kwargs)
def variant_processor(self, func):
"""Registers a variant reducer function that can be used to postprocess
all variants created from this strategy.
"""
self.variant_processor_func = func
return func
def get_grouping_component(self, event, variant, config):
"""Given a specific variant this calculates the grouping component.
"""
args = []
for iface_path in self.interfaces:
iface = event.interfaces.get(iface_path)
if iface is None:
return None
args.append(iface)
return self(event=event, variant=variant, config=config, *args)
def get_grouping_component_variants(self, event, config):
"""This returns a dictionary of all components by variant that this
strategy can produce.
"""
rv = {}
# trivial case: we do not have mandatory variants and can handle
# them all the same.
if not self.mandatory_variants:
for variant in self.variants:
component = self.get_grouping_component(event, variant, config)
if component is not None:
rv[variant] = component
else:
mandatory_component_hashes = {}
prevent_contribution = None
for variant in self.mandatory_variants:
component = self.get_grouping_component(event, variant, config)
if component is None:
continue
if component.contributes:
mandatory_component_hashes[component.get_hash()] = variant
rv[variant] = component
prevent_contribution = not mandatory_component_hashes
for variant in self.optional_variants:
# We also only want to create another variant if it
# produces different results than the mandatory components
component = self.get_grouping_component(event, variant, config)
if component is None:
continue
# In case this variant contributes we need to check two things
# here: if we did not have a system match we need to prevent
# it from contributing. Additionally if it matches the system
# component we also do not want the variant to contribute but
# with a different message.
if component.contributes:
if prevent_contribution:
component.update(
contributes=False,
hint="ignored because %s variant is not used"
% (
list(mandatory_component_hashes.values())[0]
if len(mandatory_component_hashes) == 1
else "other mandatory"
),
)
else:
hash = component.get_hash()
duplicate_of = mandatory_component_hashes.get(hash)
if duplicate_of is not None:
component.update(
contributes=False,
hint="ignored because hash matches %s variant" % duplicate_of,
)
rv[variant] = component
if self.variant_processor_func is not None:
rv = self._invoke(self.variant_processor_func, rv, event=event, config=config)
return rv
class StrategyConfiguration(object):
id = None
base = None
config_class = None
strategies = {}
delegates = {}
changelog = None
hidden = False
risk = RISK_LEVEL_LOW
def __init__(self, enhancements=None, **extra):
if enhancements is None:
enhancements = Enhancements([])
else:
enhancements = Enhancements.loads(enhancements)
self.enhancements = enhancements
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.id)
def iter_strategies(self):
"""Iterates over all strategies by highest score to lowest."""
return iter(sorted(self.strategies.values(), key=lambda x: -x.score))
def get_grouping_component(self, interface, *args, **kwargs):
"""Invokes a delegate grouping strategy. If no such delegate is
configured a fallback grouping component is returned.
"""
path = interface.path
strategy = self.delegates.get(path)
if strategy is not None:
kwargs["config"] = self
return strategy(interface, *args, **kwargs)
return GroupingComponent(id=path, hint="grouping algorithm does not consider this value")
@classmethod
def as_dict(self):
return {
"id": self.id,
"base": self.base.id if self.base else None,
"strategies": sorted(self.strategies),
"changelog": self.changelog,
"delegates": sorted(x.id for x in self.delegates.values()),
"hidden": self.hidden,
"risk": self.risk,
"latest": projectoptions.lookup_well_known_key("sentry:grouping_config").get_default(
epoch=projectoptions.LATEST_EPOCH
)
== self.id,
}
def create_strategy_configuration(
id, strategies=None, delegates=None, changelog=None, hidden=False, base=None, risk=None
):
"""Declares a new strategy configuration.
Values can be in | herited from a base configuration. For strategies if there is
a strategy of the same class it's replaced. For delegates if there is a
delegation for the same interface it's replaced.
It's i | mpossible to remove a strategy of a class when a base is declared (same
for delegates).
"""
class NewStrategyConfiguration(StrategyConfiguration):
pass
NewStrategyConfiguration.id = id
NewStrategyConfiguration.base = base
NewStrategyConfiguration.config_class = id.split(":", 1)[0]
NewStrategyConfiguration.strategies = dict(base.strategies) if base else {}
NewStrategyConfiguration.delegates = dict(base.delegates) if base else {}
if risk is None:
risk = RISK_LEVEL_LOW
NewStrategyConfiguration.risk = risk
NewStrategyConfiguration.hidden = hidden
by_class = {}
for strategy in six.itervalues(NewStrategyConfiguration.strategies):
by_class.setdefault(strategy.strategy_class, []).append(strategy.id)
for strategy_id in strategies or {}:
strategy = lookup_strategy(strategy_id)
if strategy.score is None:
raise RuntimeError("Unscored strategy %s added to %s" % (strategy_id, id))
for old_id in by_class.get(strategy.strategy_class) or ():
NewStrategyConfiguration.strategies.pop(old_id, None)
NewStrategyConfiguration.strategies[strategy_id] = strategy
new_delegates = set()
for strategy_id in delegates or ():
strategy = lookup_strategy(strategy_id)
for |
# -*- coding: utf8 -*-
from .interface import Reliability, Session, Handler
from .logger import LogName
from .server import Serve | r
from .protocol import Protocol, packet_classes
from .packet import ApplicationPacket
from .portscanner import PortScanner
__all__ = [
'Reliability',
'Session',
'Handler',
'LogName',
'Server',
'Protocol',
| 'ApplicationPacket',
'PortScanner',
'packet_classes',
] |
'''@file standard_trainer.py
contains the StandardTrainer'''
from nabu.neuralnetworks.trainers import trainer
class StandardTrainer(trainer.Trainer):
'''a trainer with no added functionality'''
def aditional_loss(self):
'''
add an aditional loss
returns:
the aditional loss or None
'''
return None
def chief_only_hooks(self, outputs):
'''add hooks only for | the chief worker
Args:
outputs: the outputs generated by the create graph method
Returns:
a list of hooks
'''
return []
def hooks(self, outputs):
'''add hooks for the session
Args:
outputs: the outputs generated by the create graph method
Returns:
a list of hooks
'''
| return []
|
the user interface. Internal calculations are performed by
a scalar based on Jan 1, 1900.
Valid NormalDate ranges include (-9999,1,1) B.C.E. through
(9999,12,31) C.E./A.D.
1.0
No changes, except the version number. After 3 years of use by
various parties I think we can consider it stable.
0.8
Added Prof. Stephen Walton's suggestion for a range method
- module author resisted the temptation to use lambda <0.5 wink>
0.7
Added Dan Winkler's suggestions for __add__, __sub__ methods
0.6
Modifications suggested by Kevin Digweed to fix:
- dayOfWeek, dayOfWeekAbbrev, clone methods
- Permit NormalDate to be a better behaved superclass
0.5
Minor tweaking
0.4
- Added methods __cmp__, __hash__
- Added Epoch variable, scoped to the module
- Added setDay, setMonth, setYear methods
0.3
Minor touch-ups
0.2
- Fixed bug for certain B.C.E leap years
- Added Jim Fulton's suggestions for short alias class name =ND
and __getstate__, __setstate__ methods
Special thanks: Roedy Green
"""
def __init__(self, normalDate=None):
"""
Accept 1 of 4 values to initialize a NormalDate:
1. None - creates a NormalDate for the current day
2. integer in yyyymmdd format
3. string in yyyymmdd format
4. tuple in (yyyy, mm, dd) - localtime/gmtime can also be used
"""
if normalDate is None:
self.setNormalDate(time.localtime(time.time()))
else:
self.setNormalDate(normalDate)
def add(self, days):
"""add days to date; use negative integers to subtract"""
if not isinstance(days,int):
raise NormalDateException( \
'add method parameter must be integer type')
self.normalize(self.scalar() + days)
def __add__(self, days):
"""add integer to normalDate and return a new, calculated value"""
if not isinstance(days,int):
raise NormalDateException( \
| '__add__ parameter must be integer type')
cloned = self. | clone()
cloned.add(days)
return cloned
def __radd__(self,days):
'''for completeness'''
return self.__add__(days)
def clone(self):
"""return a cloned instance of this normalDate"""
return self.__class__(self.normalDate)
def __cmp__(self, target):
if target is None:
return 1
elif not hasattr(target, 'normalDate'):
return 1
else:
return cmp(self.normalDate, target.normalDate)
def day(self):
"""return the day as integer 1-31"""
return int(repr(self.normalDate)[-2:])
def dayOfWeek(self):
"""return integer representing day of week, Mon=0, Tue=1, etc."""
return dayOfWeek(*self.toTuple())
def dayOfWeekAbbrev(self):
"""return day of week abbreviation for current date: Mon, Tue, etc."""
return _dayOfWeekName[self.dayOfWeek()][:3]
def dayOfWeekName(self):
"""return day of week name for current date: Monday, Tuesday, etc."""
return _dayOfWeekName[self.dayOfWeek()]
def dayOfYear(self):
"""day of year"""
if self.isLeapYear():
daysByMonth = _daysInMonthLeapYear
else:
daysByMonth = _daysInMonthNormal
priorMonthDays = 0
for m in range(self.month() - 1):
priorMonthDays = priorMonthDays + daysByMonth[m]
return self.day() + priorMonthDays
def daysBetweenDates(self, normalDate):
"""
return value may be negative, since calculation is
self.scalar() - arg
"""
if isinstance(normalDate,NormalDate):
return self.scalar() - normalDate.scalar()
else:
return self.scalar() - NormalDate(normalDate).scalar()
def equals(self, target):
if isinstance(target,NormalDate):
if target is None:
return self.normalDate is None
else:
return self.normalDate == target.normalDate
else:
return 0
def endOfMonth(self):
"""returns (cloned) last day of month"""
return self.__class__(self.__repr__()[-8:-2]+str(self.lastDayOfMonth()))
def firstDayOfMonth(self):
"""returns (cloned) first day of month"""
return self.__class__(self.__repr__()[-8:-2]+"01")
def formatUS(self):
"""return date as string in common US format: MM/DD/YY"""
d = self.__repr__()
return "%s/%s/%s" % (d[-4:-2], d[-2:], d[-6:-4])
def formatUSCentury(self):
"""return date as string in 4-digit year US format: MM/DD/YYYY"""
d = self.__repr__()
return "%s/%s/%s" % (d[-4:-2], d[-2:], d[-8:-4])
def _fmtM(self):
return str(self.month())
def _fmtMM(self):
return '%02d' % self.month()
def _fmtMMM(self):
return self.monthAbbrev()
def _fmtMMMM(self):
return self.monthName()
def _fmtMMMMM(self):
return self.monthName()[0]
def _fmtD(self):
return str(self.day())
def _fmtDD(self):
return '%02d' % self.day()
def _fmtDDD(self):
return self.dayOfWeekAbbrev()
def _fmtDDDD(self):
return self.dayOfWeekName()
def _fmtYY(self):
return '%02d' % (self.year()%100)
def _fmtYYYY(self):
return str(self.year())
def formatMS(self,fmt):
'''format like MS date using the notation
{YY} --> 2 digit year
{YYYY} --> 4 digit year
{M} --> month as digit
{MM} --> 2 digit month
{MMM} --> abbreviated month name
{MMMM} --> monthname
{MMMMM} --> first character of monthname
{D} --> day of month as digit
{DD} --> 2 digit day of month
{DDD} --> abrreviated weekday name
{DDDD} --> weekday name
'''
r = fmt[:]
f = 0
while 1:
m = _fmtPat.search(r,f)
if m:
y = getattr(self,'_fmt'+string.upper(m.group()[1:-1]))()
i, j = m.span()
r = (r[0:i] + y) + r[j:]
f = i + len(y)
else:
return r
def __getstate__(self):
"""minimize persistent storage requirements"""
return self.normalDate
def __hash__(self):
return hash(self.normalDate)
def __int__(self):
return self.normalDate
def isLeapYear(self):
"""
determine if specified year is leap year, returning true (1) or
false (0)
"""
return isLeapYear(self.year())
def _isValidNormalDate(self, normalDate):
"""checks for date validity in [-]yyyymmdd format"""
if not isinstance(normalDate,int):
return 0
if len(repr(normalDate)) > 9:
return 0
if normalDate < 0:
dateStr = "%09d" % normalDate
else:
dateStr = "%08d" % normalDate
if len(dateStr) < 8:
return 0
elif len(dateStr) == 9:
if (dateStr[0] != '-' and dateStr[0] != '+'):
return 0
year = int(dateStr[:-4])
if year < -9999 or year > 9999 or year == 0:
return 0 # note: zero (0) is not a valid year
month = int(dateStr[-4:-2])
if month < 1 or month > 12:
return 0
if isLeapYear(year):
maxDay = _daysInMonthLeapYear[month - 1]
else:
maxDay = _daysInMonthNormal[month - 1]
day = int(dateStr[-2:])
if day < 1 or day > maxDay:
return 0
if year == 1582 and month == 10 and day > 4 and day < 15:
return 0 # special case of 10 days dropped: Oct 5-14, 1582
return 1
def lastDayOfMonth(self):
"""returns last day of the month as integer 28-31"""
if self.isLeapYear():
return _daysInMonthLeapYear[self.month() - 1]
else:
return _daysInMon |
# Since bkr is a namespace package (and thus cannot have version specific
| # code in bkr.__ | init__), the version details are retrieved from here in
# order to correctly handle module shadowing on sys.path
__version__ = '28.2'
|
"""Tests for `fix.with_fixture`."""
from __future__ import with_statement
import os
| import shutil
import tempfile
from types import FunctionType
from fix import with_fixture
def test_exists():
"""`fix.with_fixture` function exists"""
assert isinstance(with_fixture, FunctionType)
def test_setup_only():
"""`setup_only` fixture works as expected"""
def setup_only(context):
"""A fixture with no `teardown()`."""
def | setup():
"""Add something to the context."""
assert context == {}
context.squee = "kapow"
return setup
@with_fixture(setup_only)
def case(context):
"""Check that the context has been set up."""
assert context == {"squee": "kapow"}
case() # pylint: disable=E1120
def test_setup_teardown():
"""`setup_teardown` fixture works as expected"""
def setup_teardown(context):
"""A fixture with both `setup()` and `teardown()`."""
def setup():
"""Add something to the context."""
assert context == {}
context.squee = "kapow"
def teardown():
"""Check that `context.squee` has changed."""
assert context == {"squee": "boing"}
return setup, teardown
@with_fixture(setup_teardown)
def case(context):
"""Alter the context."""
assert context == {"squee": "kapow"}
context.squee = "boing"
case() # pylint: disable=E1120
def test_multiple_invocation():
"""`multiple` fixture creates a fresh context each invocation"""
def multiple(context):
"""A fixture to be invoked multiple times."""
def setup():
"""Add something to the context."""
assert context == {}
context.squee = "kapow"
def teardown():
"""Check that `context.squee` has changed."""
assert context == {"squee": "kapow", "boing": "thunk"}
return setup, teardown
@with_fixture(multiple)
def case(context):
"""Add to the context."""
assert context == {"squee": "kapow"}
context.boing = "thunk"
for _ in range(3):
case() # pylint: disable=E1120
def test_external():
"""`external` fixture interacts as expected with the 'real world'."""
def external(context, files=3):
"""A fixture to manipulate temporary files and directories."""
def setup():
"""Create some temporary files."""
context.temp_dir = tempfile.mkdtemp()
context.filenames = ["file_%03d" % i for i in range(files)]
for filename in context.filenames:
with open(os.path.join(context.temp_dir, filename), "w") as f:
f.write("This is the file %r.\n" % filename)
def teardown():
"""Delete the temporary files created in `setup()`."""
shutil.rmtree(context.temp_dir)
return setup, teardown
@with_fixture(external, files=5)
def check_files(context):
"""Return the number of present and absent files."""
present = 0
absent = 0
for filename in context.filenames:
if os.path.exists(os.path.join(context.temp_dir, filename)):
present += 1
else:
absent += 1
return context.temp_dir, present, absent
temp_dir, present, absent = check_files() # pylint: disable=E1120
assert not os.path.exists(temp_dir)
assert present == 5
assert absent == 0
|
from __future__ import unicode_literals
from . import responses
url_bases = [
"https://route53.amazonaws.com/201.-..-../",
]
url_paths = {
'{0}hostedzone$': responses.list_or_create_hostzone_response,
'{0}hostedzone/[^/]+$': responses.get_or_delete_hostzone_response,
'{0}hos | tedzone/[^/]+/rrset/?$': responses.rrset_response,
'{0}healthcheck': responses.health_check_response,
'{0}tags/(healthcheck|hostedzone)/*': responses.list_or_change_tags_fo | r_resource_request,
'{0}trafficpolicyinstances/*': responses.not_implemented_response
}
|
er, # The amount under minimum limits
'50020': InsufficientFunds, # Insufficient balance
'50021': InvalidOrder, # Price is under minimum limits
'50026': InvalidOrder, # Market price parameter error
'invalid order query time': ExchangeError, # start time is greater than end time; or the time interval between start time and end time is greater than 48 hours
'invalid start time': BadRequest, # start time is a date 30 days ago; or start time is a date in the future
'invalid end time': BadRequest, # end time is a date 30 days ago; or end time is a date in the future
'20003': ExchangeError, # operation failed, {"status":"error","ts":1595730308979,"err_code":"bad-request","err_msg":"20003"}
'01001': ExchangeError, # order failed, {"status":"fail","err_code":"01001","err_msg":"系统异常,请稍后重试"}
},
'broad': {
'invalid size, valid range': ExchangeError,
},
},
'precisionMode': TICK_SIZE,
'commonCurrencies': {
'JADE': 'Jade Protocol',
},
'options': {
'fetchMarkets': [
'spot',
'swap',
],
'defaultType': 'spot', # 'spot', 'swap'
'defaultSubType': 'linear', # 'linear', 'inverse'
'broker': {
'spot': 'iauIBf#',
'swap': 'iauIBf#',
},
},
})
def fetch_time(self, params={}):
response = self.publicSpotGetPublicTime(params)
#
# {
# code: '00000',
# msg: 'success',
# requestTime: 1645837773501,
# data: '1645837773501'
# }
#
return self.safe_integer(response, 'data')
def fetch_markets(self, params={}):
types = self.safe_value(self.options, 'fetchMarkets', ['spot', 'swap'])
result = []
for i in range(0, len(types)):
type = types[i]
if type == 'swap':
subTypes = ['umcbl', 'dmcbl']
for j in range(0, len(subTypes)):
markets = self.fetch_markets_by_type(type, self.extend(params, {
'productType': subTypes[j],
}))
result = self.array_concat(result, markets)
else:
markets = self.fetch_markets_by_type(types[i], params)
result = self.array_concat(result, markets)
return result
def parse_markets(self, markets):
result = []
for i in range(0, len(markets)):
result.append(self.parse_market(markets[i]))
return result
def parse_market(self, market):
#
# spot
#
# {
# symbol: 'ALPHAUSDT_SPBL',
# symbolName: 'ALPHAUSDT',
# baseCoin: 'ALPHA',
# quoteCoin: 'USDT',
# minTradeAmount: '2',
# maxTradeAmount: '0',
# takerFeeRate: '0.001',
# makerFeeRate: '0.001',
# priceScale: '4',
# quantityScale: '4',
# status: 'online'
# }
#
# swap
#
# {
# symbol: 'BTCUSDT_UMCBL',
# makerFeeRate: '0.0002',
# takerFeeRate: '0.0006',
# feeRateUpRatio: '0.005',
# openCostUpRatio: '0.01',
# quoteCoin: 'USDT',
# baseCoin: 'BTC',
# buyLimitPriceRatio: '0.01',
# sellLimitPriceRatio: '0.01',
# supportMarginCoins: ['USDT'],
# minTradeNum: '0.001',
# priceEndStep: '5',
# volumePlace: '3',
# pricePlace: '1'
# }
#
#
marketId = self.safe_string(market, 'symbol')
quoteId = self.safe_string(market, 'quoteCoin')
baseId = self.safe_string(market, 'baseCoin')
quote = self.safe_currency_code(quoteId)
base = self.safe_currency_code(baseId)
supportMarginCoins = self.safe_value(market, 'supportMarginCoins', [])
settleId = self.safe_string(supportMarginCoins, 0)
settle = self.safe_currency_code(settleId)
symbol = base + '/' + quote
parts = marketId.split('_')
typeId = self.safe_string(parts, 1)
type = None
linear = None
inverse = None
swap = False
spot = False
contract = False
pricePrecision = None
amountPrecision = No | ne
if typeId == 'SPBL':
type = 'spot'
spot = True
priceScale = self.safe_string(market, 'priceScale')
amountScale = self.safe_string(market, 'quantityScale') |
pricePrecision = self.parse_number(self.parse_precision(priceScale))
amountPrecision = self.parse_number(self.parse_precision(amountScale))
else:
type = 'swap'
swap = True
contract = True
symbol = symbol + ':' + settle
if typeId == 'UMCBL':
linear = True
inverse = False
elif typeId == 'DMCBL':
inverse = True
linear = False
priceDecimals = self.safe_integer(market, 'pricePlace')
amountDecimals = self.safe_integer(market, 'volumePlace')
priceStep = self.safe_string(market, 'priceEndStep')
amountStep = self.safe_string(market, 'minTradeNum')
precisePrice = Precise(priceStep)
precisePrice.decimals = self.sum(precisePrice.decimals, priceDecimals)
precisePrice.reduce()
priceString = str(precisePrice)
pricePrecision = self.parse_number(priceString)
preciseAmount = Precise(amountStep)
preciseAmount.decimals = self.sum(preciseAmount.decimals, amountDecimals)
preciseAmount.reduce()
amountString = str(preciseAmount)
amountPrecision = self.parse_number(amountString)
status = self.safe_string(market, 'status')
active = None
if status is not None:
active = status == 'online'
maker = self.safe_number(market, 'makerFeeRate')
taker = self.safe_number(market, 'takerFeeRate')
precision = {
'price': pricePrecision,
'amount': amountPrecision,
}
limits = {
'amount': {
'min': self.safe_number(market, 'minTradeAmount'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}
return {
'info': market,
'id': marketId,
'symbol': symbol,
'quoteId': quoteId,
'baseId': baseId,
'quote': quote,
'base': base,
'type': type,
'spot': spot,
'swap': swap,
'future': False,
'option': False,
'margin': False,
'contract': contract,
'contractSize': None,
'linear': linear,
'inverse': inverse,
'settleId': settleId,
'settle': settle,
'expiry': None,
'expiryDatetime': None,
'optionType': None,
'strike': None,
'active': active,
'maker': maker,
'taker': taker,
'precision': precision,
'limits': limits,
}
def fetch_markets_by_type(self, type, params={}):
method = self.get_supported_mapping(type, {
'spot': 'publicSpotGetPublicProducts',
'swap': 'publicMixGetMarketContracts',
})
response = getattr(self, method)(params)
#
|
from django.db import | models
from django.contrib.auth.models import User
class OdooUser(models.Model):
user = models.OneToOneField(User)
odoo_id = models.BigIntegerField(primary_key=True)
user | name = models.CharField(max_length=256)
|
TWITTER_BUTTON = """
<a href="https://twitter.com/igor_chubin" class="twitter-follow-button" data-show-count="false" data-button="grey">Follow @igor_chubin</a>
<script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){j | s=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script>
"""
GITHUB_BUTTON = """
<!-- Place this tag where you want the button to render. -->
<a aria-label="Star chubin/wttr.in on GitHub" data-count-aria-label="# stargazers on GitHub" data-count-api="/repos/chubin/cheat.sh#stargazers_count" dat | a-count-href="/chubin/cheat.sh/stargazers" data-icon="octicon-star" href="https://github.com/chubin/cheat.sh" class="github-button">cheat.sh</a>
"""
GITHUB_BUTTON_2 = """
<!-- Place this tag where you want the button to render. -->
<a aria-label="Star chubin/cheat.sheets on GitHub" data-count-aria-label="# stargazers on GitHub" data-count-api="/repos/chubin/cheat.sheets#stargazers_count" data-count-href="/chubin/cheat.sheets/stargazers" data-icon="octicon-star" href="https://github.com/chubin/cheat.sheets" class="github-button">cheat.sheets</a>
"""
GITHUB_BUTTON_FOOTER = """
<!-- Place this tag right after the last button or just before your close body tag. -->
<script async defer id="github-bjs" src="https://buttons.github.io/buttons.js"></script>
"""
|
fetch.fetch_command(aid, "filename")
def test_untar_command(self):
fetch.untar_command("fake.tar")
def test_gunzip_command(self):
fetch.gunzip_command("fake.gz")
def test_latex_file_name(self):
for aid in test_aids:
fetch.latex_file_name(aid)
def test_latex_file_path(self):
for aid in test_aids:
fetch.latex_file_path(aid)
def test_file_name_base(self):
for aid in test_aids:
fetch.file_name_base(aid)
def test_source_file_extension(self):
for aid in test_aids:
fetch.source_file_extension(aid)
def test_source_file_exists(self):
for aid in test_aids:
fetch.source_file_exists(aid)
def test_source_file_name(self):
for aid in test_aids:
fetch.source_file_name(aid)
def test_source_file_path(self):
for aid in test_aids:
fetch.source_file_path(aid)
def test_source_file_path_without_extension(self):
for aid in test_aids:
fetch.source_file_path_without_extension(aid)
def test_file_type_string(self):
fetch.file_type_string(test_file)
def test_is_tar(self):
fetch.is_tar(test_file)
def test_is_gzip(self):
fetch.is_gzip(test_file)
def test_is_pdf(self):
fetch.is_pdf(test_file)
def test_is_tex(self):
fetch.is_tex(test_file)
def test | _is_other(self):
fetch.is_other(test_file)
@uni | ttest.skipIf(not network_tests, "Skipping network tests.")
def test_fetch_source_and_latex(self):
# the exercises fetch.source, fetch.all_source, fetch.latex, and
# fetch.all_latex
fetch.all_source(test_aids, delay=test_delay, force=True)
fetch.all_latex(test_aids)
class UpdateTest(unittest.TestCase):
@unittest.skipIf(not network_tests, "Skipping network tests.")
def test_fetch_rss(self):
update.fetch_rss()
@unittest.skipIf(not network_tests, "Skipping network tests.")
def test_parse_rss(self):
update.parse_rss()
class UtilTest(unittest.TestCase):
def test_remember_cwd(self):
cwd = os.getcwd()
with util.remember_cwd():
os.chdir("..")
self.assertEqual(os.getcwd(), cwd)
def test_can_uncan_file_object(self):
obj = [1,2,3]
tf = tempfile.TemporaryFile()
util.can(obj, tf)
tf.seek(0)
self.assertEqual(util.uncan(tf), obj)
def test_can_uncan_file_name(self):
obj = [1,2,3]
tf = tempfile.NamedTemporaryFile()
util.can(obj, tf.name)
tf.seek(0)
self.assertEqual(util.uncan(tf.name), obj)
class ArxivIdTest(unittest.TestCase):
def test_old(self):
# good ids
self.assertTrue(arxiv_id.is_old('astro-ph/1234567'))
self.assertTrue(arxiv_id.is_old('astro-ph/1234567v1'))
self.assertTrue(arxiv_id.is_old('astro-ph/1234567v12'))
# too short
self.assertFalse(arxiv_id.is_old('astro-ph/123456'))
self.assertFalse(arxiv_id.is_old('astro-ph/1234567v'))
# too long
self.assertFalse(arxiv_id.is_old('astro-ph/12345678'))
# wrong letter
self.assertFalse(arxiv_id.is_old('astro-ph/1234567a1'))
# junk at start
self.assertFalse(arxiv_id.is_old('astro-ph/a1234567'))
self.assertFalse(arxiv_id.is_old('astro-ph/a1234567v1'))
self.assertFalse(arxiv_id.is_old('astro-ph/a1234567v12'))
# junk at end
self.assertFalse(arxiv_id.is_old('astro-ph/1234567a'))
self.assertFalse(arxiv_id.is_old('astro-ph/1234567v1a'))
self.assertFalse(arxiv_id.is_old('astro-ph/1234567v12a'))
# two versions
self.assertFalse(arxiv_id.is_old('astro-ph/1234567v1v2'))
# No archive name
self.assertFalse(arxiv_id.is_old('/1234567v1v2'))
# No slash
self.assertFalse(arxiv_id.is_old('astro-ph1234567v1v2'))
def test_old_id_parse(self):
self.assertEqual(arxiv_id.archive('astro-ph/1234567v12'), 'astro-ph')
self.assertEqual(arxiv_id.yymm('astro-ph/1234567v12'), '1234')
self.assertEqual(arxiv_id.number('astro-ph/1234567v12'), '567')
self.assertEqual(arxiv_id.version('astro-ph/1234567v12'), 'v12')
self.assertEqual(arxiv_id.version('astro-ph/1234567'), '')
def test_new_id_parse(self):
self.assertEqual(arxiv_id.archive('1234.5678v12'), '')
self.assertEqual(arxiv_id.yymm('1234.5678v12'), '1234')
self.assertEqual(arxiv_id.number('1234.5678v12'), '5678')
self.assertEqual(arxiv_id.version('1234.5678v12'), 'v12')
self.assertEqual(arxiv_id.version('1234.5678'), '')
def test_is_new(self):
# good ids
self.assertTrue(arxiv_id.is_new('1234.5678'))
self.assertTrue(arxiv_id.is_new('1234.5678v1'))
self.assertTrue(arxiv_id.is_new('1234.5678v12'))
# wrong delimiter
self.assertTrue(arxiv_id.is_new('1234a5678'))
# too short
self.assertFalse(arxiv_id.is_new('123.5678'))
self.assertFalse(arxiv_id.is_new('1234.567'))
self.assertFalse(arxiv_id.is_new('1234.5678v'))
# too long
self.assertFalse(arxiv_id.is_new('1234.56788'))
# wrong letter
self.assertFalse(arxiv_id.is_new('1234.5678a1'))
# junk at start
self.assertFalse(arxiv_id.is_new('a1234.5678'))
self.assertFalse(arxiv_id.is_new('a1234.5678v1'))
self.assertFalse(arxiv_id.is_new('a1234.5678v12'))
# junk at end
self.assertFalse(arxiv_id.is_new('1234.5678a'))
self.assertFalse(arxiv_id.is_new('1234.5678v1a'))
self.assertFalse(arxiv_id.is_new('1234.5678v12a'))
# two versions
self.assertFalse(arxiv_id.is_new('1234.5678v1v2'))
class ScrapeTest(unittest.TestCase):
def setUp(self):
# Need the latex files for these tests to make sense. They
# may be fetched once, thereafter cached versions will be
# used.
#
# This means that all of the tests in this class depend on the
# network
self.fetch_verbose_setting = fetch.verbose
self.scrape_verbose_setting = scrape.verbose
fetch.verbose = False
scrape.verbose = False
any_fetched = fetch.all_source(test_aids, delay=test_delay)
if any_fetched:
fetch.all_latex(test_aids)
def tearDown(self):
fetch.verbose = self.fetch_verbose_setting
scrape.verbose = self.scrape_verbose_setting
@unittest.skipIf(not network_tests, "Skipping network tests.")
def test_long_comments(self):
for aid in test_aids:
scrape.long_comments(aid)
@unittest.skipIf(not network_tests, "Skipping network tests.")
def test_short_comments(self):
for aid in test_aids:
scrape.short_comments(aid)
@unittest.skipIf(not network_tests, "Skipping network tests.")
def test_write_output(self):
tf_1 = tempfile.NamedTemporaryFile()
tf_2 = tempfile.NamedTemporaryFile()
scrape.write_output(test_aids,
tf_1.name, tf_2.name)
# def test_all_comments(self):
# for aid in test_aids:
# scrape.all_comments(aid)
class CommentRegexpTest(unittest.TestCase):
def test_long_comment_regexp(self):
self.assertTrue(re.search(scrape.long_comment_regexp, '% and comment'))
self.assertTrue(re.search(scrape.long_comment_regexp, ' % and comment'))
# make sure I get the whole comment
self.assertEqual(re.search(scrape.long_comment_regexp, '%% and comment').group(1),
'%% and comment')
self.assertEqual(re.search(scrape.long_comment_regexp, ' %% and comment').group(1),
'%% and comment')
self.assertEqual(re.search(scrape.long_comment_regexp, '% and % comment').group(1),
'% and % comment')
self.assertEqual(re.search(scrape.long_comment_regexp, ' % and % comment').group(1),
'% and % comment' |
# -*- coding: utf-8 -*-
"""Git tools."""
from shlex import split
from plumbum import ProcessExecutionError
from plumbum.cmd import git
DEVELOPMENT_BRANCH = "develop"
def run_git(*args, dry_run=False, quiet=False):
"""Run a git command, print it before executing and capture the output."""
command = git[split(" ".join(args))]
if | not quiet:
print("{}{}".format("[DRY-RUN] " if dry_run else "", command))
if dry_run:
return ""
rv = command()
if not quiet and rv:
print(rv)
return rv
def branch_exists(branch):
"""Return True if the branch exists."""
| try:
run_git("rev-parse --verify {}".format(branch), quiet=True)
return True
except ProcessExecutionError:
return False
def get_current_branch():
"""Get the current branch name."""
return run_git("rev-parse --abbrev-ref HEAD", quiet=True).strip()
|
def extractDevastatranslationsWordpressCom(item):
'''
| Parser for 'devastatranslations.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tag | map:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsTextBlock.
Run with: ctest -V -R QgsTextBlock
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '12/05/2020'
__copyright__ = 'Copyright 2020, The QGIS Project'
import qgis # NOQA
from qgis.core import (
QgsTextBlock,
QgsTextFragment,
QgsStringUtils
)
from qgis.testing import start_app, unittest
start_app()
class TestQgsTextBlock(unittest.TestCase):
def testConstructors(self):
# empty
block = QgsTextBlock()
self.assertEqual(len(block), 0)
# single fragment block
fragment = QgsTextFragment('ludicrous gibs!')
block = QgsTextBlock(fragment)
self.assertEqual(len(block), 1)
self.assertEqual(block[0].text(), fragment.text())
self.assertEqual(block.toPlainText(), 'ludicrous gibs!')
def testAppend(self):
block = QgsTextBlock()
self.assertEqual(len(block), 0)
frag = QgsTextFragment('a')
block.append(frag)
self.assertEqual(len(block), 1)
self.assertEqual(block[0].text(), 'a')
frag = QgsTextFragment('b')
block.append(frag)
self.assertEqual(len(block), 2)
self.assertEqual(block[0].text(), 'a')
self.assertEqual(block[1].text(), 'b')
self.assertEqual(block.toPlainText(), 'ab')
def testAt(self):
block = QgsTextBlock()
block.append(QgsTextFragment('a'))
block.append(QgsTextFragment('b'))
self.assertEqual(len(block), 2)
self.assertEqual(block.at(0).text(), 'a')
self.assertEqual(block.at(1).text(), 'b')
with self.assertRaises(KeyError):
block.at(2)
with self.assertRaises(KeyError):
block.at(-1)
self.assertEqual(block[0].text(), 'a')
self.assertE | qual(block[1].text(), 'b')
| with self.assertRaises(IndexError):
_ = block[2]
self.assertEqual(block[-1].text(), 'b')
self.assertEqual(block[-2].text(), 'a')
def testClear(self):
block = QgsTextBlock()
block.append(QgsTextFragment('a'))
block.append(QgsTextFragment('b'))
self.assertEqual(len(block), 2)
self.assertFalse(block.empty())
block.clear()
self.assertEqual(len(block), 0)
self.assertTrue(block.empty())
def testCapitalize(self):
fragment = QgsTextFragment('ludicrous gibs!')
block = QgsTextBlock(fragment)
block.append(QgsTextFragment('another part'))
block.applyCapitalization(QgsStringUtils.TitleCase)
self.assertEqual(block.toPlainText(), 'Ludicrous Gibs!Another Part')
if __name__ == '__main__':
unittest.main()
|
class InvalidDev | iceType(E | xception):
pass
|
from django.test import TestCase
from common.templatetags.verbose_name import verbose_name |
from users.models import SystersUser
class TemplateTagsTestCase(TestCase):
def test_verbose_names(self):
"""Test verbose_name template tag"""
self.assertEqual(verbose_name(SystersUser, "homepage_url"), "Homepa | ge")
|
" Userman: Dump the database into a JSON file."
import json
import tarfile
from cStringIO import StringIO
from userman import utils
from userman import constants
def dump(db, filename):
"""Dump contents of the database to a tar file, optionally compressed.
Return the number of items, and the number of attachment files dumped."""
count_items = 0
count_files = 0
if filename.endswith('.gz'):
mode = 'w:gz'
elif filename.endswith('.bz2'):
mode = 'w:bz2'
else:
mode = 'w'
outfile = tarfile.open(filename, mode=mode)
for key in db:
if not constants.IUID_RX.match(key): continue
doc = db[key]
del doc['_rev']
info = tarfile.TarInfo(doc['_id'])
data = json.dumps(doc)
info.size = len(data)
outfile.addfile(info, StringIO(data))
count_items += 1
for attname in doc.get('_attachments', dict()):
info = tarfile.TarInfo("{0}_att/{1}".format(doc['_id'], attname))
attfile = db.get_attachment(doc, attname)
data = attfile.read()
attfile.close()
info.size = len(data)
outfile.addfile(info, StringIO(data))
count_files += 1
outfile.close()
return count_items, count_files
def undump(db, filename):
"""Reverse of dump; load all items from a tar file.
Items are just added to the database, ignoring existing items."""
count_items = 0
count_files = 0
attachments = dict()
infile = tarfile.open(filename, mode='r')
for item in infile:
itemfile = infile.extractfile(item)
itemdata = itemfile.read()
itemfile.close()
if item.name in attachments:
# This relies on an attachment being after its item in the tarfile.
db.put_attachment(doc, itemdata, **attachments.pop(item.name))
count_files += 1
else:
doc = json.loads(itemdata)
# If the user document already exists, do | not load again.
if doc[constants.DB_DOCTYPE] == constants.USER:
rows = db.view('user/email', key=doc['email'])
if len(list(rows)) != 0: continue
atts = doc.pop('_attachments', dict())
db.save(doc)
count_items += 1
for attname, attinfo in atts.items():
key = "{0}_att/{1}".format(doc['_id'], attname)
at | tachments[key] = dict(filename=attname,
content_type=attinfo['content_type'])
infile.close()
return count_items, count_files
if __name__ == '__main__':
import sys
try:
utils.load_settings(filepath=sys.argv[1])
except IndexError:
utils.load_settings()
db = utils.get_db()
if len(sys.argv) == 2:
filename = sys.argv[1]
else:
filename = 'dump.tar.gz'
count_items, count_files = dump(db, filename)
print 'dumped', count_items, 'items and', count_files, 'files to', filename
|
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessing and Infeeding Tools For Redial and MovieLens Data."""
import functools
import json
from absl import logging
import tensorflow.compat.v1 as tf
from trainer import constants
def rd_jsonl_to_tsv(in_fname, out_fname):
"""Converts the redial jsonl to a tsv."""
logging.info("Reading: %s", in_fname)
def fix_spacing(text):
"""Removes extra spaces."""
# Remove incorrect spacing around punctuation.
text = text.replace(" ,", ",").replace(" .", ".").replace(" %", "%")
text = text.replace(" - ", "-").replace(" : ", ":").replace(" / ", "/")
text = text.replace("( ", "(").replace(" )", ")")
text = text.replace("`` ", "\"").replace(" ''", "\"")
text = text.replace(" 's", "'s").replace("s ' ", "s' ")
return text
count = 0
with tf.io.gfile.GFile(in_fname, "rb") as infile,\
tf.io.gfile.GFile(out_fname, "w") as outfile:
for line in infile:
ex = json.loads(line)
conversation = fix_spacing(ex["conversation"])
response = fix_spacing(ex["response"])
# Write this line as <conversation>\t<response>
outfile.write("%s\t%s\n" % (conversation, response))
count += 1
tf.logging.log_every_n(
tf.logging.INFO,
"Wrote %d examples to %s." % (count, out_fname),
1000)
return count
def generic_dataset_fn(split, path, reverse=False, shuffle_files=False):
"""Returns a tf dataset of (conversation, response) pairs for redial."""
# We only have one file for each split.
del shuffle_files
# Load lines from the text file as examples.
ds = tf.data.TextLineDataset(path[split])
# Split each "<input>\t<target>" example into
# a (input, target) tuple.
ds = ds.map(
functools.partial(tf.io.decode_csv, record_defaults=["", ""],
field_delim="\t", use_quote_delim=False),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# reverse if necessary
if reverse:
ds = ds.map(lambda *ex: ex[::-1])
# Map each tuple to a {"inputs": ... "targets": ...} dict.
ds = ds.map(lambda *ex: dict(zip(["inputs", "targets"], ex)))
return ds
de | f generic_preprocessor(ds, label):
"""Prepares text for input into model."""
def normalize_text(text):
"""Lowercase and remove quotes from a TensorFlow string."""
text = tf.strings.lower(text)
return text
def to_inputs_and_targets(ex):
"""apply preprocessing functions and add task label."" | "
return {
"inputs":
tf.strings.join(
[label, normalize_text(ex["inputs"])]),
"targets": normalize_text(ex["targets"])
}
return ds.map(to_inputs_and_targets,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
def dataset_fn_wrapper(dataset):
"""Returns the dataset function for the desired dataset.
Args:
dataset: a string representing the desired dataset/task
(rd_recommendations, ml_sequences, ml_tags_normal, ml_tags_reversed,
ml_tags_masked, probe_1, or probe_2)
Returns:
a function that can be passed in as a T5 dataset function
(split, shufffle_files) -> tf.data.dataset
"""
path = {
"rd_recommendations": constants.RD_TSV_PATH,
"ml_sequences": constants.ML_SEQ_TSV_PATH,
"ml_tags_normal": constants.ML_TAGS_TSV_PATH,
"ml_tags_reversed": constants.ML_TAGS_TSV_PATH,
"ml_reviews": constants.ML_REVIEWS_TSV_PATH,
"probe_1": constants.PROBE_1_TSV_PATH,
"probe_1_sequences": constants.PROBE_1_SEQ_TSV_PATH,
"probe_2": constants.PROBE_2_TSV_PATH,
"probe_3": constants.PROBE_3_TSV_PATH,
"probe_4": constants.PROBE_4_TSV_PATH
}[dataset]
reverse = dataset == "ml_tags_reversed"
return lambda split, shuffle_files=False: generic_dataset_fn(split,
path,
reverse,
shuffle_files)
def preprocessor_wrapper(task):
"""Returns the preprocessing function for the desired task.
Args:
task: a string representing the desired task (rd_recommendations,
ml_sequences, ml_tags)
Returns:
a function that can be passed in as a T5 dataset function
(tf.data.dataset) -> tf.data.dataset
"""
label = {
"rd_recommendations": "redial conversation: ",
"ml_sequences": "movielens sequence: ",
"ml_tags": "movielens tags: ",
"ml_reviews": "movielens review: "
}[task]
return lambda ds: generic_preprocessor(ds, label)
|
# -*- coding: utf-8 -*-
"""
Amavis management frontend.
Provides:
* SQL quarantine management
* Per-domain settings
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy
from modoboa.admin.models import Domain
from modoboa.core.extensions import ModoExtension, exts_pool
from modoboa.parameters import tools as param_tools
from . import __version__, forms
from .lib import create_user_and_policy, create_user_and_use_policy
class Amavis(ModoExtension):
"""The Amavis extension."""
name = "modoboa_amavis"
label = ugettext_lazy("Amavis frontend")
version = __version__
description = ugettext_lazy("Simple amavis management frontend")
url = "quarantine"
available_for_topredirection = True
def load(self):
param_tools.registry.add("global", forms.ParametersForm, "Amavis")
| param_tools.registry.add(
"user", forms.UserSettings, ugettext_lazy("Quarantine"))
def load_initial_data(self):
"""Create records for existing domains and co."""
for dom in Domai | n.objects.all():
policy = create_user_and_policy("@{0}".format(dom.name))
for domalias in dom.domainalias_set.all():
domalias_pattern = "@{0}".format(domalias.name)
create_user_and_use_policy(domalias_pattern, policy)
exts_pool.register_extension(Amavis)
|
random
import mimetypes
import imghdr
import traceback
import json
import redis
import logging
import requests
from requests.exceptions import RequestException
TYPE_CAT = 'cats'
TYPE_DOG = 'dogs'
TYPE_OTHER = 'others'
SOURCE_ROOT = os.path.join('..', 'images')
TWO_HOUR_EXPIRE = 60*60*2 # in seconds
MEDIA_ID_EXPIRE = TWO_HOUR_EXPIRE * 35 # in seconds
ACCESS_TOKEN_KEY = 'wechat:token:v1:%s'
MEDIA_ID_KEY = 'wechat:media_ids:v1:%s'
MEDIA_ID_OUTPUT = 'data'
MEDIA_ID_USER_KEY = 'wechat:media_ids:user:v1:%s:%s'
MEDIA_ID_FILE = 'media_ids_v1_%s.txt'
UPLOAD_IMAGE_URL = 'https://api.weixin.qq.com/cgi-bin/media/upload?access_token=%s&type=image'
GET_TOKEN_URL = 'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=%s&secret=%s'
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('MediaStore')
def get_wechat_access_token(app_id, app_secret):
url = GET_TOKEN_URL % (app_id, app_secret)
logger.info('get_wechat_access_token url=%s' % url)
response = requests.get(url)
response.encoding = 'utf-8'
logger.info('get_wechat_access_token result=%s' % response.json())
return response.json()['access_token']
class MediaStore(object):
_redis = redis.StrictRedis(decode_responses=True)
def __init__(self, name, app_id, app_secret, r=_redis, expire=MEDIA_ID_EXPIRE):
assert name, 'name can not be None'
assert app_id, 'app_id can not be None'
assert app_secret, 'app_secret can not be None'
self.name = name
self.app_id = app_id
self.app_secret = app_secret
self.expire = expire
self.r = r
logger.debug('__init__ name=%s app_id=%s, app_secret=%s' %
(name, app_id, app_secret))
def _get_media_key(self, type_name=''):
return MEDIA_ID_KEY % type_name
de | f _get_media_file(self, type_name=''):
return os.path.join(MEDIA_ID_OUTPUT, MEDIA_ID_FILE % type_name)
def _get_user_key(self, user_id, type_name=''):
return MEDIA_ID_USER_KEY % (type_name, user_id)
def _get_access_t | oken(self):
token = self.r.get(ACCESS_TOKEN_KEY % self.app_id)
if not token:
token = get_wechat_access_token(self.app_id, self.app_secret)
logger.info('get_wechat_access_token token=%s' % token)
if token:
self.r.set(ACCESS_TOKEN_KEY % self.app_id, token)
self.r.expire(ACCESS_TOKEN_KEY % self.app_id, TWO_HOUR_EXPIRE)
return token
def clear_media_ids(self, type_name=''):
logger.info('clear_media_ids type=%s' % type_name)
self.r.delete(self._get_media_key(type_name))
def save_media_ids(self, media_ids, type_name='', replace=True):
if media_ids:
with open(self._get_media_file(type_name), 'w') as f:
f.write('\n'.join(media_ids))
key = self._get_media_key(type_name)
if replace:
self.r.delete(key)
rt = self.r.sadd(key, *media_ids)
self.r.expire(key, self.expire)
logger.info('save_media_ids %s media ids saved %s' %
(self.media_ids_length(type_name), rt))
return media_ids
def upload_image(self, filepath):
token = self._get_access_token()
if not token:
raise IOError('token is None')
url = UPLOAD_IMAGE_URL % token
files = {'media': open(filepath, 'rb')}
try:
response = requests.post(url, files=files)
response.encoding = 'utf-8'
return response.json()['media_id']
except RequestException as e:
logger.error('upload_image error=%s' % e)
def upload_images(self, source_dir, type_name='', max_count=100):
if not source_dir or not os.path.isdir(source_dir):
return
logger.info('upload_images [%s] for type [%s]' % (source_dir, type_name))
names = os.listdir(source_dir)
if len(names) > max_count:
names = random.sample(names, max_count)
count = 0
mids = []
for name in names:
filepath = os.path.join(source_dir, name)
filepath = os.path.abspath(filepath)
mime_type, _ = mimetypes.guess_type(name)
if mime_type not in ['image/jpeg', 'image/png', 'image/gif']:
logger.warning('upload_images invalid=%s' % filepath)
continue
logger.info('upload_images file=%s' % filepath)
media_id = self.upload_image(filepath)
if media_id:
logger.info('upload_images result=%s' % media_id)
mids.append(media_id)
count += 1
if count > max_count:
break
self.save_media_ids(mids, type_name)
def random_user_media_id(self, user_id=None, type_name=''):
if not user_id:
return self.random_media_id(type_name)
media_key = self._get_media_key(type_name)
user_key = self._get_user_key(user_id, type_name)
mids = self.r.sdiff(media_key, user_key)
mid = None
if mids:
mid = random.choice(list(mids))
if mid:
self.r.sadd(user_key, mid)
self.r.expire(user_key, self.expire)
if not mid:
self.r.delete(user_key)
mid = self.random_media_id(type_name)
logger.debug('random_user_media_id user_id=%s result=%s' %
(user_id, mid))
return mid
def all_media_ids(self, type_name=''):
return self.r.smembers(self._get_media_key(type_name))
def media_ids_length(self, type_name=''):
return self.r.scard(self._get_media_key(type_name))
def random_media_id(self, type_name=''):
return self.r.srandmember(self._get_media_key(type_name))
from config import WECHAT_APPID, WECHAT_APPSECRET, WECHAT2_APPID, WECHAT2_APPSECRET
store1 = MediaStore('Cat', WECHAT_APPID, WECHAT_APPSECRET)
store2 = MediaStore('Miu', WECHAT2_APPID, WECHAT2_APPSECRET)
def update_app(store, root=SOURCE_ROOT):
for type_name in (TYPE_CAT, TYPE_DOG, TYPE_OTHER):
source_dir = os.path.join(root, type_name)
store.upload_images(source_dir, type_name)
def update_all(root=SOURCE_ROOT):
check_all(root)
update_app(store1, root)
update_app(store2, root)
def check_all(root=SOURCE_ROOT):
for type_name in (TYPE_CAT, TYPE_DOG, TYPE_OTHER):
source_dir = os.path.abspath(os.path.join(root, type_name))
if not os.path.exists(source_dir):
print('ERROR: check_all source dir [%s] not exists' % source_dir)
exit(1)
if not os.path.isdir(source_dir):
print('ERROR: check_all source dir [%s] not directory' % source_dir)
exit(2)
if not os.listdir(source_dir):
print('ERROR: check_all source dir [%s] is empty' % source_dir)
exit(2)
print('all directories exists, check passed.')
def test_all():
for store in [store1, store2]:
for type_name in (TYPE_CAT, TYPE_DOG, TYPE_OTHER):
print('\n[Store:%s] found %s values for type %s, read test:'
% (store.name, store.media_ids_length(type_name), type_name))
for i in range(0, 10):
print(store1.random_user_media_id('test', type_name))
for i in range(0,10):
assert store1.random_user_media_id('test', type_name), 'No media id found'
assert store1.random_media_id(type_name), 'No media id found'
print('all tests passed.')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
prog='wechat_uploader', description='WeChat Images Uploader v0.1.0')
parser.add_argument('-c', '--check', action="store_true",
help='check source dir')
parser.add_argument('-t', '--test', action="store_true",
help='test read media id')
parser.add_argument('-u', '--upload', action="store_true",
help='upload all images')
parser.add_argument('-s', '--source', help='images source directory |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import random
import pickle
import json
from fanpy import Fanfou, FanfouHTTPError, NoAuth
from fanpy.api import FanfouDictResponse, FanfouListResponse, POST_ACTIONS, method_for_uri
noauth = NoAuth()
fanfou_na = Fanfou(auth=noauth)
AZaz = 'abcdefghijklmnopqrstuvwxyz1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def get_random_str():
return ''.join(random.choice(AZaz) for _ in range(10))
def test_FanfouHTTPError_raised_for_invalid_oauth():
test_passed = False
try:
fanfou_na.statuses.mentions()
except FanfouHTTPError:
test_passed = True
assert test_passed
def test_pickle_ability():
res = FanfouDictResponse({'a': 'b'})
p = pickle.dumps(res)
res2 = pickle.loads(p)
assert res == res2
assert res2['a'] == 'b'
res = FanfouListResponse([1, 2, 3])
p = pickle.dumps(res)
res2 = pickle.loads(p)
assert res == res2
assert res2[2] == 3
def test_json_ability():
res = FanfouDictResponse({'a': 'b'})
p = json.dumps(res)
res2 = json.loads(p)
assert res == res2
assert res2['a'] == 'b'
res = FanfouListResponse([1, 2, 3])
p | = json.dumps(res)
res2 = json.loads(p)
assert res == res2
assert res2[2] == 3
def test_method_for_uri():
for act | ion in POST_ACTIONS:
assert method_for_uri(get_random_str() + '/' + action) == 'POST'
assert method_for_uri('statuses/home_timeline') == 'GET'
|
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The Particl Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_particl import (
ParticlTestFramework,
isclose,
)
from test_framework.messages import COIN
class SmsgPaidFeeExtTest(ParticlTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [['-debug', '-noacceptnonstdtxn', '-reservebalance=10000000', '-txindex'] for i in range(self.num_nodes)]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self, split=False):
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
self.start_nodes()
self.connect_nodes(0, 1)
self.sync_all()
def run_test(self):
nodes = self.nodes
self.import_genesis_coins_a(nodes[0])
self.import_genesis_coins_b(nodes[1])
address0 = nodes[0].getnewaddress()
address1 = nodes[1].getnewaddress()
nodes[0].smsgaddlocaladdress(address0)
nodes[1].smsgaddaddress(address0, nodes[0].smsglocalkeys()['wallet_keys'][0]['public_key'])
text = 'Some text to test'
ro = nodes[1].smsgsend(address1, address0, text, True, 10, True)
assert(ro['result'] == 'Not Sent.')
assert(isclose(ro['fee'], 0.00159000))
assert(nodes[0].smsggetfeerate() == 50000)
assert(nodes[1].smsggetfeerate() == 50000)
ro = nodes[0].walletsettings('stakingoptions', {'smsgfeeratetarget' : 0.001})
assert(float(ro['stakingoptions']['smsgfeeratetarget']) == 0.001)
self.stakeBlocks(49)
assert(nodes[0].smsggetfeerate() == 50000)
ro = nodes[1].smsgsend(address1, address0, text, | True, 10)
assert(ro['result'] == 'Sent.')
assert('msgid' in ro)
assert('txid' in ro)
assert(isclose(ro['fee'], 0.00159000))
self.stakeBlocks(1)
assert(nodes[0].smsggetfee | rate() == 61939)
ro = nodes[1].smsgsend(address1, address0, text, True, 10, True)
assert(ro['result'] == 'Not Sent.')
assert(isclose(ro['fee'], 0.00189080))
ro = nodes[1].getblock(nodes[1].getblockhash(50), 2)
assert(ro['tx'][0]['vout'][0]['smsgfeerate'] * COIN == 61939)
assert(ro['tx'][0]['vout'][0]['smsgdifficulty'] == '1f0fffff')
ro = nodes[0].walletsettings('stakingoptions', {'smsgdifficultytarget' : '000000000000bfffffffffffffffffffffffffffffffffffffffffffffffffff', 'smsgfeeratetarget' : 0.001})
assert(float(ro['stakingoptions']['smsgfeeratetarget']) == 0.001)
assert(ro['stakingoptions']['smsgdifficultytarget'] == '000000000000bfffffffffffffffffffffffffffffffffffffffffffffffffff')
self.sync_all()
self.stakeBlocks(1)
assert(nodes[0].smsggetfeerate() == 61939)
ro = nodes[1].getrawtransaction(nodes[1].getblockreward(51)['coinstake'], True)
block_51_smsgfeerate = ro['vout'][0]['smsgfeerate'] * COIN
block_51_smsgdifficulty = int(ro['vout'][0]['smsgdifficulty'], 16)
assert(block_51_smsgfeerate > 61939)
assert(block_51_smsgdifficulty < 0x1f0fffff)
self.waitForSmsgExchange(1, 1, 0)
ro = nodes[0].smsginbox('all')
assert(len(ro['messages']) == 1)
assert(ro['messages'][0]['text'] == text)
self.log.info('Verify node settings survive a restart')
self.stop_node(0)
self.start_node(0, self.extra_args[0] + ['-wallet=default_wallet',])
self.connect_nodes(0, 1)
ro = nodes[0].walletsettings('stakingoptions')
assert(float(ro['stakingoptions']['smsgfeeratetarget']) == 0.001)
assert(ro['stakingoptions']['smsgdifficultytarget'] == '000000000000bfffffffffffffffffffffffffffffffffffffffffffffffffff')
self.stakeBlocks(1)
ro = nodes[1].getblock(nodes[1].getblockhash(52), 2)
assert(ro['tx'][0]['vout'][0]['smsgfeerate'] * COIN > block_51_smsgfeerate)
assert(int(ro['tx'][0]['vout'][0]['smsgdifficulty'], 16) < block_51_smsgdifficulty)
if __name__ == '__main__':
SmsgPaidFeeExtTest().main()
|
import numpy
from chainer import cuda
from cha | iner import function
from chainer.utils import type_check
def _transpose(xs, length):
xp = cuda.get_array_module(*xs)
lengths = numpy.zeros(length, dtype='i')
for i, x in enumerate(xs):
lengths[0:len(x)] = i + 1
dtype = xs[0].dtype
unit = xs[0].shape[1:]
outs = tuple([xp.empty((l,) + unit, dtype=dtype) for l in lengths])
for i, x in enumerate(xs):
for p, xi in enumerate(x):
outs[p][i] = xi
return outs
class TransposeSequence(function.Func | tion):
"""Function that transposes a list of Variables."""
def check_type_forward(self, xs_type):
for p, n in zip(xs_type, xs_type[1:]):
type_check.expect(
p.shape[0] >= n.shape[0],
p.shape[1:] == n.shape[1:],
)
def forward(self, xs):
if len(xs) == 0:
return ()
return _transpose(xs, len(xs[0]))
def backward(self, xs, gs):
return _transpose(gs, len(xs))
def transpose_sequence(xs):
"""Transpose a list of Variables.
This function transposes a list of :class:`~chainer.Variable` s and returns
a list of :class:`Variable` s.
For exampe a user gives ``[(0, 1, 2, 3), (4, 5), (6)]``, the function
returns ``[(0, 4, 6), (1, 5), (2), (3)]``.
Note that a given list needs to be sorted by each length of
:class:`~chainer.Variable`.
Args:
xs (list of ~chainer.Variable): Variables to transpose.
Returns:
tuple or Variable: Transposed list.
"""
ys = TransposeSequence()(*xs)
if not isinstance(ys, tuple):
ys = (ys,)
return ys
|
# -*- encoding: utf-8 -*-
'''
:maintainer: HubbleStack
:maturity: 2016.7.0
:platform: Windows
:requires: SaltStack
'''
from __future__ import absolute_import
import copy
import fnmatch
import logging
import salt.utils
import salt.utils.platform
from salt.exceptions import CommandExecutionError
from distutils.version import LooseVersion
log = logging.getLogger(__name__)
__virtualname__ = 'win_pkg'
def __virtual__():
if not salt.utils.platform.is_windows():
return False, 'This audit module only runs on windows'
return True
def apply_labels(__data__, labels):
'''
Filters out the tests whose label doesn't match the labels given when running audit and returns a new data structure with only labelled tests.
'''
labelled_data = {}
if labels:
labelled_data[__virtualname__] = {}
for topkey in ('blacklist', 'whitelist'):
if topkey in __data__.get(__virtualname__, {}):
labelled_test_cases=[]
for test_case in __data__[__virtualname__].get(topkey, []):
# each test case is a dictionary with just one key-val pair. key=test name, val=test data, description etc
if isinstance(test_case, dict) and test_case:
test_case_body = test_case.get(next(iter(test_case)))
if set(labels).issubset(set(test_case_body.get('labels',[]))):
labelled_test_cases.append(test_case)
labelled_data[__virtualname__][topkey]=labelled_test_cases
else:
labelled_data = __data__
return labelled_data
def audit(data_list, tags, labels, debug=False, **kwargs):
'''
Runs auditpol on the local machine and audits the return data
with the CIS yaml processed by __virtual__
'''
__data__ = {}
try:
__pkgdata__ = __salt__['pkg.list_pkgs']()
except CommandExecutionError:
__salt__['pkg.refresh_db']()
__pkgdata__ = __salt__['pkg.list_pkgs']()
for profile, data in data_list:
_merge_yaml(__data__, data, profile)
__data__ = apply_labels(__data__, labels)
__tags__ = _get_tags(__data__)
if debug:
log.debug('package audit __data__:')
log.debug(__data__)
log.debug('package audit __tags__:')
log.debug(__tags__)
ret = {'Success': [], 'Failure': [], 'Controlled': []}
for tag in __tags__:
if fnmatch.fnmatch(tag, tags):
for tag_data in __tags__[tag]:
if 'control' in tag_data:
ret['Controlled'].append(tag_data)
continue
name = tag_data['name']
audit_type = tag_data['type']
match_output = tag_data['match_output'].lower()
# Blacklisted audit (do not include)
if 'blacklist' in audit_type:
if name not in __pkgdata__:
ret['Success'].append(tag_data)
else:
tag_data['failure_reason'] = "Blacklisted package '{0}' is installed " \
"on the system".format(name)
ret['Failure'].append(tag_data)
# Whitelisted audit (must include)
if 'whitelist' in audit_type:
if name in __pkgdata__:
audit_value = __pkgdata__[name]
tag_data['found_value'] = audit_value
secret = _translate_value_type(audit_value, tag_data['value_type'], match_output)
if secret:
ret['Success'].append(tag_data)
else:
tag_data['failure_reason'] = "Version '{0}({1}) of the requisite" \
| " package '{2}' is not installed on" \
| " the system".format(match_output,
tag_data['value_type'],
name)
ret['Failure'].append(tag_data)
else:
tag_data['failure_reason'] = "Version '{0}({1}) of the requisite package" \
" '{2}' is not installed on the system" \
.format(match_output, tag_data['value_type'], name)
ret['Failure'].append(tag_data)
return ret
def _merge_yaml(ret, data, profile=None):
'''
Merge two yaml dicts together at the secedit:blacklist and
secedit:whitelist level
'''
if __virtualname__ not in ret:
ret[__virtualname__] = {}
for topkey in ('blacklist', 'whitelist'):
if topkey in data.get(__virtualname__, {}):
if topkey not in ret[__virtualname__]:
ret[__virtualname__][topkey] = []
for key, val in data[__virtualname__][topkey].iteritems():
if profile and isinstance(val, dict):
val['nova_profile'] = profile
ret[__virtualname__][topkey].append({key: val})
return ret
def _get_tags(data):
'''
Retrieve all the tags for this distro from the yaml
'''
ret = {}
distro = __grains__.get('osfullname')
for toplist, toplevel in data.get(__virtualname__, {}).iteritems():
# secedit:whitelist
for audit_dict in toplevel:
for audit_id, audit_data in audit_dict.iteritems():
# secedit:whitelist:PasswordComplexity
tags_dict = audit_data.get('data', {})
# secedit:whitelist:PasswordComplexity:data
tags = None
for osfinger in tags_dict:
if osfinger == '*':
continue
osfinger_list = [finger.strip() for finger in osfinger.split(',')]
for osfinger_glob in osfinger_list:
if fnmatch.fnmatch(distro, osfinger_glob):
tags = tags_dict.get(osfinger)
break
if tags is not None:
break
# If we didn't find a match, check for a '*'
if tags is None:
tags = tags_dict.get('*', [])
# secedit:whitelist:PasswordComplexity:data:Windows 2012
if isinstance(tags, dict):
# malformed yaml, convert to list of dicts
tmp = []
for name, tag in tags.iteritems():
tmp.append({name: tag})
tags = tmp
for item in tags:
for name, tag in item.iteritems():
tag_data = {}
# Whitelist could have a dictionary, not a string
if isinstance(tag, dict):
tag_data = copy.deepcopy(tag)
tag = tag_data.pop('tag')
if tag not in ret:
ret[tag] = []
formatted_data = {'name': name,
'tag': tag,
'module': 'win_auditpol',
'type': toplist}
formatted_data.update(tag_data)
formatted_data.update(audit_data)
formatted_data.pop('data')
ret[tag].append(formatted_data)
return ret
def _translate_value_type(current, value, evaluator):
if 'equal' in value.lower() and LooseVersion(current) == LooseVersion(evaluator):
return True
if 'less' in value.lower() and LooseVersion(current) <= LooseVersion(evaluator):
return True
if 'more' in value.lower() and LooseVersion(current) >= LooseVersion(evaluator):
return True
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.