code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
"""
Support gathering system information of hosts which are running glances.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.glances/
"""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PORT, CONF_USERNAME, CONF_PASSWORD, CONF_SSL,
CONF_VERIFY_SSL, CONF_RESOURCES, TEMP_CELSIUS)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
REQUIREMENTS = ['glances_api==0.2.0']
_LOGGER = logging.getLogger(__name__)
CONF_VERSION = 'version'
DEFAULT_HOST = 'localhost'
DEFAULT_NAME = 'Glances'
DEFAULT_PORT = '61208'
DEFAULT_VERSION = 2
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=1)
SENSOR_TYPES = {
'disk_use_percent': ['Disk used', '%', 'mdi:harddisk'],
'disk_use': ['Disk used', 'GiB', 'mdi:harddisk'],
'disk_free': ['Disk free', 'GiB', 'mdi:harddisk'],
'memory_use_percent': ['RAM used', '%', 'mdi:memory'],
'memory_use': ['RAM used', 'MiB', 'mdi:memory'],
'memory_free': ['RAM free', 'MiB', 'mdi:memory'],
'swap_use_percent': ['Swap used', '%', 'mdi:memory'],
'swap_use': ['Swap used', 'GiB', 'mdi:memory'],
'swap_free': ['Swap free', 'GiB', 'mdi:memory'],
'processor_load': ['CPU load', '15 min', 'mdi:memory'],
'process_running': ['Running', 'Count', 'mdi:memory'],
'process_total': ['Total', 'Count', 'mdi:memory'],
'process_thread': ['Thread', 'Count', 'mdi:memory'],
'process_sleeping': ['Sleeping', 'Count', 'mdi:memory'],
'cpu_temp': ['CPU Temp', TEMP_CELSIUS, 'mdi:thermometer'],
'docker_active': ['Containers active', '', 'mdi:docker'],
'docker_cpu_use': ['Containers CPU used', '%', 'mdi:docker'],
'docker_memory_use': ['Containers RAM used', 'MiB', 'mdi:docker'],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
vol.Optional(CONF_RESOURCES, default=['disk_use']):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
vol.Optional(CONF_VERSION, default=DEFAULT_VERSION): vol.In([2, 3]),
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the Glances sensors."""
from glances_api import Glances
name = config[CONF_NAME]
host = config[CONF_HOST]
port = config[CONF_PORT]
version = config[CONF_VERSION]
var_conf = config[CONF_RESOURCES]
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
ssl = config[CONF_SSL]
verify_ssl = config[CONF_VERIFY_SSL]
session = async_get_clientsession(hass, verify_ssl)
glances = GlancesData(
Glances(hass.loop, session, host=host, port=port, version=version,
username=username, password=password, ssl=ssl))
await glances.async_update()
if glances.api.data is None:
raise PlatformNotReady
dev = []
for resource in var_conf:
dev.append(GlancesSensor(glances, name, resource))
async_add_entities(dev, True)
class GlancesSensor(Entity):
"""Implementation of a Glances sensor."""
def __init__(self, glances, name, sensor_type):
"""Initialize the sensor."""
self.glances = glances
self._name = name
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self._name, SENSOR_TYPES[self.type][0])
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return SENSOR_TYPES[self.type][2]
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self.glances.available
@property
def state(self):
"""Return the state of the resources."""
return self._state
async def async_update(self):
"""Get the latest data from REST API."""
await self.glances.async_update()
value = self.glances.api.data
if value is not None:
if self.type == 'disk_use_percent':
self._state = value['fs'][0]['percent']
elif self.type == 'disk_use':
self._state = round(value['fs'][0]['used'] / 1024**3, 1)
elif self.type == 'disk_free':
try:
self._state = round(value['fs'][0]['free'] / 1024**3, 1)
except KeyError:
self._state = round((value['fs'][0]['size'] -
value['fs'][0]['used']) / 1024**3, 1)
elif self.type == 'memory_use_percent':
self._state = value['mem']['percent']
elif self.type == 'memory_use':
self._state = round(value['mem']['used'] / 1024**2, 1)
elif self.type == 'memory_free':
self._state = round(value['mem']['free'] / 1024**2, 1)
elif self.type == 'swap_use_percent':
self._state = value['memswap']['percent']
elif self.type == 'swap_use':
self._state = round(value['memswap']['used'] / 1024**3, 1)
elif self.type == 'swap_free':
self._state = round(value['memswap']['free'] / 1024**3, 1)
elif self.type == 'processor_load':
# Windows systems don't provide load details
try:
self._state = value['load']['min15']
except KeyError:
self._state = value['cpu']['total']
elif self.type == 'process_running':
self._state = value['processcount']['running']
elif self.type == 'process_total':
self._state = value['processcount']['total']
elif self.type == 'process_thread':
self._state = value['processcount']['thread']
elif self.type == 'process_sleeping':
self._state = value['processcount']['sleeping']
elif self.type == 'cpu_temp':
for sensor in value['sensors']:
if sensor['label'] in ['CPU', "Package id 0",
"Physical id 0", "cpu-thermal 1",
"exynos-therm 1", "soc_thermal 1"]:
self._state = sensor['value']
elif self.type == 'docker_active':
count = 0
for container in value['docker']['containers']:
if container['Status'] == 'running' or \
'Up' in container['Status']:
count += 1
self._state = count
elif self.type == 'docker_cpu_use':
use = 0.0
for container in value['docker']['containers']:
use += container['cpu']['total']
self._state = round(use, 1)
elif self.type == 'docker_memory_use':
use = 0.0
for container in value['docker']['containers']:
use += container['memory']['usage']
self._state = round(use / 1024**2, 1)
class GlancesData:
"""The class for handling the data retrieval."""
def __init__(self, api):
"""Initialize the data object."""
self.api = api
self.available = True
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Get the latest data from the Glances REST API."""
from glances_api.exceptions import GlancesApiError
try:
await self.api.get_data()
self.available = True
except GlancesApiError:
_LOGGER.error("Unable to fetch data from Glances")
self.available = False
|
PetePriority/home-assistant
|
homeassistant/components/sensor/glances.py
|
Python
|
apache-2.0
| 8,615
|
################################################################################
#
# Copyright 2014-2016 Eric Lacombe <eric.lacombe@security-labs.org>
#
################################################################################
#
# This file is part of fuddly.
#
# fuddly is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# fuddly is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with fuddly. If not, see <http://www.gnu.org/licenses/>
#
################################################################################
import sys
import random
import string
import array
from framework.global_resources import convert_to_internal_repr
def rand_string(size=None, min=1, max=10, str_set=string.printable):
out = ""
if size is None:
size = random.randint(min, max)
else:
# if size is not an int, TypeError is raised with python3, but not
# with python2 where the loop condition is always evaluated to True
assert isinstance(size, int)
while len(out) < size:
val = random.choice(str_set)
out += val
return out
def corrupt_bytes(s, p=0.01, n=None, ctrl_char=False):
"""Corrupt a given percentage or number of bytes from a string"""
s = bytearray(s)
l = len(s)
if n is None:
n = max(1,int(l*p))
for i in random.sample(range(l), n):
if ctrl_char:
s[i] = random.choice([x for x in range(0,32)] + [0x7f])
else:
s[i] = (s[i]+random.randint(1,255))%256
return bytes(s)
def corrupt_bits(s, p=0.01, n=None, ascii=False):
"""Flip a given percentage or number of bits from a string"""
s = bytearray(s)
l = len(s)*8
if n is None:
n = max(1,int(l*p))
for i in random.sample(range(l), n):
s[i//8] ^= 1 << (i%8)
if ascii:
s[i//8] &= 0x7f
return bytes(s)
def calc_parity_bit(x):
"""return 0 if the number of bits is even, otherwise returns 1"""
bit = 0
num_bits = 0
while x:
bitmask = 1 << bit
bit += 1
if x & bitmask:
num_bits += 1
x &= ~bitmask
return num_bits % 2
if __name__ == "__main__":
for i in range(10):
print(corrupt_bits(b'testing', p=0.05))
for i in range(10):
print(corrupt_bytes(b'testing', p=0.05))
for i in range(4):
print(rand_string(10))
print(rand_string(min=15, max=30))
print(rand_string())
print(rand_string(min=15, max=30, str_set='RXVZ'))
|
k0retux/fuddly
|
framework/basic_primitives.py
|
Python
|
gpl-3.0
| 2,943
|
#!/usr/bin/python2.3
from OpenGL.GL import *
from OpenGL.GLU import *
import config
import utilities
from graph import Graph, Vertex, DummyVertex, Edge, SuperEdge, X_AXIS, Y_AXIS, Z_AXIS
from sys import stderr
from math import acos, atan2, pi, log
class GraphGL:
"""
A wrapper class to hold textures and methods used to draw graphs in
the GL context.
"""
def __init__(self, graph):
"""
Initialize a GraphGL object and bind it to the specified graph.
Also initialize the display list - won't actually be created
until the first time we draw the graph.
"""
self.graph = graph
self.displaylist = None
self.quad = gluNewQuadric()
def init(self):
"""
Should be called after OpenGL is initialized but before calling draw().
"""
# Initialize the vertex texture
self.vertexTex = None
bitmap = ''
w = 64
h = 64
for i in range(w):
for j in range(h):
c = chr(255 - (0, 48)[bool(i & 0x3) & bool(j & 0x3)])
bitmap = bitmap + c + c + c
glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
self.vertexTex = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self.vertexTex)
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE_ALPHA, w, h, 0, GL_RGB,
GL_UNSIGNED_BYTE, bitmap)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE)
# Initialize the edge texture
self.edgeTex = None
bitmap = ''
w = 64
h = 64
for i in range(w):
for j in range(h):
c = chr(255 - (0, 48)[bool(i & 0x4) ^ bool(j & 0x4)])
bitmap = bitmap + c + c + c
glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
self.edgeTex = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self.edgeTex)
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE_ALPHA, w, h, 0, GL_RGB,
GL_UNSIGNED_BYTE, bitmap)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE)
def drawEdge(self, edge, alpha=1.0):
"""
draw an edge in 3D.
"""
# Superedge drawing is a no-op - all the parts will already have been
# drawn.
if isinstance(edge, SuperEdge):
return
if config.current['global:enable-anaglyph']:
pigment = (1.0, 1.0, 1.0, alpha)
else:
pigment = edge.color + (alpha,)
glColor4f(*pigment)
# For Netcli (has no parent)
ctx = self.graph.parent
showG = False
if not ctx == None:
if ctx.showGrid:
showG = True
if showG:
srcpos = utilities.mult3D(edge.source.pos, ctx.spacing)
tgtpos = utilities.mult3D(edge.target.pos, ctx.spacing)
else:
srcpos = edge.source.pos
tgtpos = edge.target.pos
if config.current['global:draw-edge-cylinders']:
diff = utilities.diff3D(srcpos, tgtpos)
dist = utilities.dist3D(c=diff)
# Avoid dividing by zero -
# don't draw anything for zero-length edges
if dist <= 0:
return
glPushMatrix()
phi = acos(diff[Z_AXIS] / dist) * 180 / pi
theta = atan2(diff[Y_AXIS], diff[X_AXIS]) * 180 / pi
glTranslatef(*srcpos)
glRotatef(theta, 0.0, 0.0, 1.0)
glRotatef(phi, 0.0, 1.0, 0.0)
gluQuadricOrientation(self.quad, GLU_OUTSIDE)
gluQuadricTexture(self.quad, GL_TRUE)
gluQuadricDrawStyle(self.quad, GLU_FILL)
gluQuadricNormals(self.quad, GLU_SMOOTH)
#glRotated(90, 1, 0, 0)
gluCylinder(self.quad, edge.radius, edge.radius, dist, config.current['global:draw-edge-sides'],
config.current['global:draw-edge-sides'])
glPopMatrix()
else: # not drawing edges as cylinders
glDisable(GL_LIGHTING)
glEnable(GL_LINE_SMOOTH)
glLineWidth(config.current['global:draw-edge-linewidth'])
glBegin(GL_LINES)
glVertex3f(srcpos[0], srcpos[1], srcpos[2])
glVertex3f(tgtpos[0], tgtpos[1], tgtpos[2])
glEnd()
glEnable(GL_LIGHTING)
def drawEdgeSelection(self, edge, alpha=0.4):
"""Draw a hilight around the edge."""
if isinstance(edge, SuperEdge):
for subedge in edge.edgeOrder():
self.drawEdgeSelection(subedge)
for vx in edge.bends:
self.drawVertexSelection(vx)
return
ctx = self.graph.parent
# Multiply points if snapping to grid
# for Netcli (has no parent)
showG = False
if not ctx == None:
if ctx.showGrid:
showG = True
if showG:
srcpos = utilities.mult3D(edge.source.pos, ctx.spacing)
tgtpos = utilities.mult3D(edge.target.pos, ctx.spacing)
else:
srcpos = edge.source.pos
tgtpos = edge.target.pos
diff = utilities.diff3D(srcpos, tgtpos)
dist = utilities.dist3D(c=diff)
# Avoid dividing by zero -
# don't draw anything for zero-length edges
if dist <= 0:
return
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glPushMatrix()
pigment = edge.color + (alpha,)
glColor4f(*pigment)
# Figure out radius boost factor
x = max(log(40 * edge.radius, 10), 0)
phi = acos(diff[Z_AXIS] / dist) * 180 / pi
theta = atan2(diff[Y_AXIS], diff[X_AXIS]) * 180 / pi
glTranslatef(*srcpos)
glRotatef(theta, 0.0, 0.0, 1.0)
glRotatef(phi, 0.0, 1.0, 0.0)
gluQuadricOrientation(self.quad, GLU_OUTSIDE)
gluQuadricTexture(self.quad, GL_TRUE)
gluQuadricDrawStyle(self.quad, GLU_FILL)
gluQuadricNormals(self.quad, GLU_SMOOTH)
# glRotated(90, 1, 0, 0)
gluCylinder(self.quad, edge.radius + x, edge.radius + x, dist, config.current['global:draw-edge-sides'],
config.current['global:draw-edge-sides'])
glPopMatrix()
glDisable(GL_BLEND)
def drawVertex(self, vertex, alpha=1.0):
"""
draw a vertex in 3D
"""
glPushMatrix()
if config.current['global:enable-anaglyph']:
pigment = (1.0, 1.0, 1.0, 1.0)
else:
pigment = vertex.color + (alpha,)
glColor4f(*pigment)
# For Netcli (has no parent)
showG = False
ctx = self.graph.parent
if not ctx == None:
if ctx.showGrid:
showG = True
if showG:
vrtpos = utilities.mult3D(vertex.pos, ctx.spacing)
else:
vrtpos = vertex.pos
glTranslatef(vrtpos[0], vrtpos[1], vrtpos[2])
# glutSolidSphere(vertex.radius, config.current['global:draw-sphere-slices'], config.current['global:draw-sphere-stacks'])
gluQuadricOrientation(self.quad, GLU_OUTSIDE)
gluQuadricTexture(self.quad, GL_TRUE)
gluQuadricDrawStyle(self.quad, GLU_FILL)
gluQuadricNormals(self.quad, GLU_SMOOTH)
glRotated(90, 1, 0, 0)
gluSphere(self.quad, vertex.radius, config.current['global:draw-sphere-slices'],
config.current['global:draw-sphere-stacks'])
glPopMatrix()
def drawVertexSelection(self, vertex, alpha=0.6):
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glPushMatrix()
pigment = vertex.color + (alpha,)
glColor4f(*pigment)
ctx = self.graph.parent
# For Netcli (has no parent)
showG = False
if not ctx == None:
if ctx.showGrid:
showG = True
if showG:
vrtpos = utilities.mult3D(vertex.pos, ctx.spacing)
else:
vrtpos = vertex.pos
glTranslatef(vrtpos[0], vrtpos[1], vrtpos[2])
if vertex.radius > 0:
x = max(log(40 * vertex.radius, 10), 0)
else:
x = 1
# glutSolidSphere(vertex.radius + x, config.current['global:draw-sphere-slices'], config.current['global:draw-sphere-stacks'])
gluQuadricOrientation(self.quad, GLU_OUTSIDE)
gluQuadricTexture(self.quad, GL_TRUE)
gluQuadricDrawStyle(self.quad, GLU_FILL)
gluQuadricNormals(self.quad, GLU_SMOOTH)
glRotated(90, 1, 0, 0)
gluSphere(self.quad, vertex.radius + x, config.current['global:draw-sphere-slices'],
config.current['global:draw-sphere-stacks'])
glPopMatrix()
glDisable(GL_BLEND)
def draw(self, edgeNameBase=None, vertNameBase=None):
"""
draw this graph in 3D
"""
makingList = False
renderMode = glGetInteger(GL_RENDER_MODE)
import sys
if renderMode == GL_SELECT:
# Rendering for selection
glInitNames()
if edgeNameBase == None:
edgeNameBase = 0
if vertNameBase == None:
vertNameBase = len(self.graph.edges)
else:
# Only do display list stuff if we're not in select mode
if self.displaylist == None:
# No display list found
# set up a new display list - this might fail
self.displaylist = glGenLists(1)
if self.displaylist < 1:
# This indicates failure to initialize a display list.
# We will now do things the hard way.
self.displaylist = False
else:
# Initializing display list
makingList = True
glNewList(self.displaylist, GL_COMPILE)
elif self.graph.dirty and self.displaylist:
# Need to rebuild the display list
makingList = True
glNewList(self.displaylist, GL_COMPILE)
if self.displaylist and not makingList and renderMode == GL_RENDER:
# Executing display list
glCallList(self.displaylist)
else:
#
# Protect us from buggy drawing code.
#
try:
#
# Draw the polygons now
#
glDisable(GL_POLYGON_SMOOTH)
glDisable(GL_CULL_FACE)
# Draw all the edges
glBindTexture(GL_TEXTURE_2D, self.edgeTex)
for e_num in range(len(self.graph.edges)):
if self.graph.edges[e_num].hidden:
continue
if renderMode == GL_SELECT:
glPushName(edgeNameBase + e_num)
self.drawEdge(self.graph.edges[e_num])
if renderMode == GL_SELECT:
glPopName()
# Draw all the vertices
glBindTexture(GL_TEXTURE_2D, self.vertexTex)
for v_num in range(len(self.graph.vertices)):
if self.graph.vertices[v_num].hidden:
continue
if renderMode == GL_SELECT:
glPushName(vertNameBase + v_num)
self.drawVertex(self.graph.vertices[v_num])
if renderMode == GL_SELECT:
glPopName()
glEnable(GL_CULL_FACE)
except:
if makingList:
glEndList()
# self.displaylist = False
print "Unexpected error:", sys.exc_info()[0]
raise
if makingList:
# End display list
glEndList()
# Call it now
glCallList(self.displaylist)
self.graph.clean()
# Only draw selections if we're in render mode -
# not in selection mode.
if renderMode == GL_RENDER:
glEnable(GL_CULL_FACE)
# Draw selections around selected edges.
glDisable(GL_TEXTURE_2D)
for e in self.graph.edges_selected:
if e.hidden:
continue
self.drawEdgeSelection(e)
# Draw selection spheres around selected vertices.
for v in self.graph.vertices_selected:
if v.hidden:
continue
self.drawVertexSelection(v)
glDisable(GL_CULL_FACE)
|
ulethHCI/GLuskap
|
graphgl.py
|
Python
|
gpl-2.0
| 13,203
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from django import forms
from .models import Editorials
class EditorialsModelForm(forms.ModelForm):
class Meta:
model = Editorials
fields = '__all__'
widgets = {
'content': forms.Textarea(attrs={'cols': 80, 'rows': 20}),
'subtitle': forms.Textarea(attrs={'cols': 80, 'rows': 3}),
}
class EditorialsAdmin( admin.ModelAdmin ):
form = EditorialsModelForm
list_display = ('title', 'published_date', 'news_paper')
list_filter = ['published_date']
search_fields = ['title']
admin.site.register(Editorials, EditorialsAdmin)
|
radheygupta/editorialsnow
|
editorials/admin.py
|
Python
|
apache-2.0
| 703
|
from neomodel import (Property, StructuredNode, StringProperty, DateProperty, AliasProperty, UniqueProperty,
DateTimeProperty, RelationshipFrom, BooleanProperty, Relationship, DoesNotExist, ZeroOrOne,
DeflateError, One, ZeroOrMore, OneOrMore, AttemptedCardinalityViolation, MultipleNodesReturned,
StructuredRel)
from py2neo.cypher.error.statement import ParameterMissing
import os
import http_error_codes
from flask import jsonify, make_response
from neomodel import db
import application_codes
from .errors import WrongTypeError, ParameterNotSupported
from datetime import datetime
import hashlib
base_url = os.environ.get('BASE_API_URL', 'http://localhost:10200/v1')
CONTENT_TYPE = "application/vnd.api+json; charset=utf-8"
class SerializableStructuredNode(StructuredNode):
"""
This class extends NeoModel's StructuredNode class. It adds a series of functions in order to allow for \
creation of json responses that conform to the jsonapi specification found at http://jsonapi.org/
"""
hashed = []
secret = []
dates = []
enums = dict()
updated = DateTimeProperty(default=datetime.now())
created = DateTimeProperty(default=datetime.now())
active = BooleanProperty(default=True)
type = StringProperty(default='serializable_structured_nodes')
id = StringProperty(required=True, unique_index=True)
def get_self_link(self):
return '{base_url}/{type}/{id}'.format(base_url=base_url, type=self.type, id=self.id)
@classmethod
def get_class_link(cls):
return '{base_url}/{type}'.format(base_url=base_url, type=cls.__type__)
@classmethod
def resource_collection_response(cls, offset=0, limit=20):
"""
This method is deprecated for version 1.1.0. Please use get_collection
"""
request_args = {'page[offset]': offset, 'page[limit]': limit}
return cls.get_collection(request_args)
def individual_resource_response(self, included=[]):
data = dict()
data['data'] = self.get_resource_object()
data['links'] = {'self': self.get_self_link()}
data['included'] = self.get_included_from_list(included)
r = make_response(jsonify(data))
r.status_code = http_error_codes.OK
r.headers['Content-Type'] = CONTENT_TYPE
return r
def get_path_resources(self, path):
response = list()
if path:
nodes = eval('self.{part}.all()'.format(part=path[0]))
for n in nodes:
if n.get_resource_object() not in response:
response.append(n.get_resource_object())
response += n.get_path_resources(path[1:])
return response
def get_included_from_list(self, included):
response = list()
props = self.defined_properties()
included = [x.split('.') for x in included]
for attr_name in props.keys():
if not isinstance(props[attr_name], Property): # is attribute
for path in included:
if attr_name == path[0]:
response += self.get_path_resources(path)
return response
def get_resource_object(self):
response = dict()
response['id'] = self.id
response['type'] = self.type
response['attributes'] = dict()
response['relationships'] = dict()
props = self.defined_properties()
for attr_name in props.keys():
if isinstance(props[attr_name], Property): # is attribute
if attr_name not in self.secret:
response['attributes'][attr_name] = getattr(self, attr_name)
else: # is relationship
response['relationships'][attr_name] = dict()
# links
response['relationships'][attr_name]['links'] = {
'self': '{base_url}/{type}/{id}/relationships/{attr_name}'.format(
base_url=base_url,
type=self.type,
id=self.id,
attr_name=attr_name),
'related': '{base_url}/{type}/{id}/{attr_name}'.format(
base_url=base_url,
type=self.type,
id=self.id,
attr_name=attr_name)
}
# data
related_node_or_nodes = eval('self.{attr_name}.all()'.format(attr_name=attr_name))
if not eval("type(self.{related_collection_type})".format(related_collection_type=attr_name)) == ZeroOrOne:
response['relationships'][attr_name]['data'] = list()
for the_node in related_node_or_nodes:
if the_node.active:
# TODO: Decide whether or not to include relationship meta info
# x = getattr(self, attr_name)
# rsrc_identifier = x.relationship(the_node).get_resource_identifier_object(the_node)
rsrc_identifier = {'id': the_node.id, 'type': the_node.type}
response['relationships'][attr_name]['data'].append(rsrc_identifier)
elif related_node_or_nodes:
the_node = related_node_or_nodes[0]
# x = getattr(self, attr_name)
# rsrc_identifier = x.relationship(the_node).get_resource_identifier_object(the_node)
rsrc_identifier = {'type': the_node.type, 'id': the_node.id}
response['relationships'][attr_name]['data'] = rsrc_identifier
else:
response['relationships'][attr_name]['data'] = None
return response
def relationship_collection_response(self, related_collection_type, offset=0, limit=20):
try:
response = dict()
response['included'] = list()
total_length = eval('len(self.{related_collection_type})'.format(
related_collection_type=related_collection_type)
)
response['links'] = {
'self': '{base_url}/{type}/{id}/relationships/{related_collection_type}?page[offset]={offset}&page[limit]={limit}'.format(
base_url=base_url,
type=self.type,
id=self.id,
related_collection_type=related_collection_type,
offset=offset,
limit=limit
),
'related': '{base_url}/{type}/{id}/{related_collection_type}'.format(
base_url=base_url,
type=self.type,
id=self.id,
related_collection_type=related_collection_type),
'first': '{base_url}/{type}/{id}/relationships/{related_collection_type}?page[offset]={offset}&page[limit]={limit}'.format(
base_url=base_url,
type=self.type,
id=self.id,
related_collection_type=related_collection_type,
offset=0,
limit=limit
),
'last': "{base_url}/{type}/{id}/relationships/{related_collection_type}?page[offset]={offset}&page[limit]={limit}".format(
base_url=base_url,
type=self.type,
id=self.id,
related_collection_type=related_collection_type,
offset=total_length - (total_length % int(limit)),
limit=limit
)
}
if int(offset) - int(limit) > 0:
response['links']['prev'] = "{base_url}/{type}/{id}/relationships/{related_collection_type}?page[offset]={offset}&page[limit]={limit}".format(
base_url=base_url,
type=self.type,
id=self.id,
related_collection_type=related_collection_type,
offset=int(offset) - int(limit),
limit=limit
)
if total_length > int(offset) + int(limit):
response['links']['next'] = "{base_url}/{type}/{id}/relationships/{related_collection_type}?page[offset]={offset}&page[limit]={limit}".format(
base_url=base_url,
type=self.type,
id=self.id,
related_collection_type=related_collection_type,
offset=int(offset) + int(limit),
limit=limit
)
# data
relation_type = eval('self.{related_collection_type}.definition'.format(
related_collection_type=related_collection_type)).get('relation_type')
results, columns = self.cypher(
"START a=node({self}) MATCH a-[rel:{relation_type}]-(end_node) RETURN rel,end_node SKIP {offset} LIMIT {limit}".format(
self=self._id, relation_type=relation_type, offset=offset, limit=limit
)
)
# TODO: For line below SerializableStructuredRel must be set to specific rel model type
relclass = SerializableStructuredRel.get_relclass_from_type(results[0][0]['type'])
relationships = [relclass.inflate(row["rel"]) for row in results]
related_node_or_nodes = [self.inflate(row["end_node"]) for row in results]
if not type(getattr(self, related_collection_type)) == ZeroOrOne:
response['data'] = list()
for i, the_node in enumerate(related_node_or_nodes):
if the_node.active:
response['data'].append(relationships[i].get_resource_identifier_object(the_node))
response['included'].append(the_node.get_resource_object())
elif related_node_or_nodes: # The collection contains 1 item
the_node = related_node_or_nodes[0]
response['data'] = {'type': the_node.type, 'id': the_node.id}
response['data'] = relationships[0].get_resource_identifier_object(the_node)
response['included'].append(the_node.get_resource_object())
else: # The collection is has Cardinality ZeroOrOne and is zero, so null
response['data'] = None
r = make_response(jsonify(response))
r.status_code = http_error_codes.OK
r.headers['Content-Type'] = CONTENT_TYPE
except AttributeError:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
return r
def set_related_resources_collection_inactive(self, related_collection_type):
try:
# data
relation_type = eval('self.{related_collection_type}.definition'.format(
related_collection_type=related_collection_type)).get('relation_type')
results, columns = self.cypher(
"START a=node({self}) MATCH a-[:{relation_type}]-(b) RETURN b".format(
self=self._id, relation_type=relation_type
)
)
related_node_or_nodes = [self.inflate(row[0]) for row in results]
for n in related_node_or_nodes:
n.deactivate()
r = make_response('')
r.status_code = http_error_codes.NO_CONTENT
r.headers['Content-Type'] = CONTENT_TYPE
except AttributeError:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
return r
def set_individual_related_resource_inactive(self, related_collection_type, related_resource):
# data
related_node_or_nodes = eval('self.{related_collection_type}.search(id=related_resource)'.format(related_collection_type=related_collection_type), )
if len(related_node_or_nodes) == 1:
the_node = related_node_or_nodes[0]
the_node.deactivate()
r = make_response('')
r.status_code = http_error_codes.NO_CONTENT
r.headers['Content-Type'] = CONTENT_TYPE
else:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
return r
def related_resources_collection_response(self, related_collection_type, included, offset=0, limit=20):
try:
response = dict()
response['included'] = list()
total_length = eval('len(self.{related_collection_type})'.format(
related_collection_type=related_collection_type)
)
response['links'] = {
'self': '{base_url}/{type}/{id}/{related_collection_type}?page[offset]={offset}&page[limit]={limit}'.format(
base_url=base_url,
type=self.type,
id=self.id,
related_collection_type=related_collection_type,
offset=offset,
limit=limit
),
'first': '{base_url}/{type}/{id}/{related_collection_type}?page[offset]={offset}&page[limit]={limit}'.format(
base_url=base_url,
type=self.type,
id=self.id,
related_collection_type=related_collection_type,
offset=0,
limit=limit
),
'last': "{base_url}/{type}/{id}/{related_collection_type}?page[offset]={offset}&page[limit]={limit}".format(
base_url=base_url,
type=self.type,
id=self.id,
related_collection_type=related_collection_type,
offset=total_length - (total_length % int(limit)),
limit=limit
)
}
if int(offset) - int(limit) > 0:
response['links']['prev'] = "{base_url}/{type}/{id}/{related_collection_type}?page[offset]={offset}&page[limit]={limit}".format(
base_url=base_url,
type=self.type,
id=self.id,
related_collection_type=related_collection_type,
offset=int(offset) - int(limit),
limit=limit
)
if total_length > int(offset) + int(limit):
response['links']['next'] = "{base_url}/{type}/{id}/{related_collection_type}?page[offset]={offset}&page[limit]={limit}".format(
base_url=base_url,
type=self.type,
id=self.id,
related_collection_type=related_collection_type,
offset=int(offset) + int(limit),
limit=limit
)
# data
relation_type = eval('self.{related_collection_type}.definition'.format(
related_collection_type=related_collection_type)).get('relation_type')
results, columns = self.cypher(
"START a=node({self}) MATCH a-[:{relation_type}]-(b) RETURN b SKIP {offset} LIMIT {limit}".format(
self=self._id, relation_type=relation_type, offset=offset, limit=limit
)
)
related_node_or_nodes = [self.inflate(row[0]) for row in results]
if not eval("type(self.{related_collection_type})".format(related_collection_type=related_collection_type)) == ZeroOrOne:
response['data'] = list()
for the_node in related_node_or_nodes:
if the_node.active:
response['data'].append(the_node.get_resource_object())
for n in the_node.get_included_from_list(included):
if n not in response['included']:
response['included'].append(n)
elif related_node_or_nodes:
the_node = related_node_or_nodes[0]
response['data'].append(the_node.get_resource_object())
else:
response['data'] = None
r = make_response(jsonify(response))
r.status_code = http_error_codes.OK
r.headers['Content-Type'] = CONTENT_TYPE
except AttributeError:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
except SyntaxError:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
return r
def related_resources_individual_response(self, related_collection_type, related_resource, included=[]):
response = dict()
response['links'] = {
'self': '{base_url}/{type}/{id}/{related_collection_type}/{related_resource}'.format(
base_url=base_url,
type=self.type,
id=self.id,
related_collection_type=related_collection_type,
related_resource=related_resource),
}
# data
related_node_or_nodes = eval('self.{related_collection_type}.search(id=related_resource)'.format(related_collection_type=related_collection_type), )
if len(related_node_or_nodes) == 1:
the_node = related_node_or_nodes[0]
response['data'] = the_node.get_resource_object()
response['included'] = the_node.get_included_from_list(included)
r = make_response(jsonify(response))
r.status_code = http_error_codes.OK
r.headers['Content-Type'] = CONTENT_TYPE
else:
response['data'] = None
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
return r
def delete_relationship_collection(self, related_collection_type):
related_node_or_nodes = eval('self.{related_collection_type}.all()'.format(
related_collection_type=related_collection_type
))
for node in related_node_or_nodes:
eval('self.{related_collection_type}.disconnect(node)'.format(
related_collection_type=related_collection_type
))
r = make_response('')
r.status_code = http_error_codes.NO_CONTENT
r.headers['Content-Type'] = CONTENT_TYPE
return r
def delete_individual_relationship(self, related_collection_type, related_resource):
related_node_or_nodes = eval('self.{related_collection_type}.search(id=related_resource)'.format(
related_collection_type=related_collection_type
))
for node in related_node_or_nodes:
eval('self.{related_collection_type}.disconnect(node)'.format(
related_collection_type=related_collection_type
))
r = make_response('')
r.status_code = http_error_codes.NO_CONTENT
r.headers['Content-Type'] = CONTENT_TYPE
return r
def individual_relationship_response(self, related_collection_type, related_resource, included=[]):
try:
response = dict()
response['data'] = dict()
response['included'] = list()
response['links'] = {
'self': '{base_url}/{type}/{id}/relationships/{related_collection_type}/{related_resource}'.format(
base_url=base_url,
type=self.type,
id=self.id,
related_collection_type=related_collection_type,
related_resource=related_resource),
'related': '{base_url}/{type}/{id}/{related_collection_type}/{related_resource}'.format(
base_url=base_url,
type=self.type,
id=self.id,
related_collection_type=related_collection_type,
related_resource=related_resource)
}
# data
relation_type = eval('self.{related_collection_type}.definition'.format(
related_collection_type=related_collection_type)).get('relation_type')
results, columns = self.cypher(
"START a=node({self}) MATCH a-[rel:{relation_type}]-(end_node) RETURN rel, end_node".format(
self=self._id, relation_type=relation_type
)
)
if len(results) == 1:
relationship = results[0]["rel"]
the_node = self.inflate(results[0]["end_node"])
if the_node.active:
response['data'] = relationship.get_resource_identifier_object(the_node)
response['included'].append(the_node.get_resource_object())
else:
raise DoesNotExist
r = make_response(jsonify(response))
r.status_code = http_error_codes.OK
r.headers['Content-Type'] = CONTENT_TYPE
except (AttributeError, DoesNotExist):
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
return r
def deactivate(self):
self.active = False
self.save()
@classmethod
def get_collection(cls, request_args):
r"""
Used to fetch a collection of resource object of type 'cls' in response to a GET request\
. get_resource_or_collection should only be invoked on a resource when the client specifies a GET request.
:param request_args: The query parameters supplied with the request. currently supports page[offset], and \
page[limit]. Pagination only applies to collection requests. See http://jsonapi.org/format/#fetching-pagination.
:return: An HTTP response object in accordance with the specification at \
http://jsonapi.org/format/#fetching-resources
"""
try:
if request_args.get('include'):
raise ParameterNotSupported
offset = request_args.get('page[offset]', 0)
limit = request_args.get('page[limit]', 20)
query = "MATCH (n) WHERE n:{label} AND n.active RETURN n ORDER BY n.id SKIP {offset} LIMIT {limit}".format(
label=cls.__name__,
offset=offset,
limit=limit)
results, meta = db.cypher_query(query)
data = dict()
data['data'] = list()
data['links'] = dict()
data['links']['self'] = "{class_link}?page[offset]={offset}&page[limit]={limit}".format(
class_link=cls.get_class_link(),
offset=offset,
limit=limit
)
data['links']['first'] = "{class_link}?page[offset]={offset}&page[limit]={limit}".format(
class_link=cls.get_class_link(),
offset=0,
limit=limit
)
if int(offset) - int(limit) > 0:
data['links']['prev'] = "{class_link}?page[offset]={offset}&page[limit]={limit}".format(
class_link=cls.get_class_link(),
offset=int(offset)-int(limit),
limit=limit
)
if len(cls.nodes) > int(offset) + int(limit):
data['links']['next'] = "{class_link}?page[offset]={offset}&page[limit]={limit}".format(
class_link=cls.get_class_link(),
offset=int(offset)+int(limit),
limit=limit
)
data['links']['last'] = "{class_link}?page[offset]={offset}&page[limit]={limit}".format(
class_link=cls.get_class_link(),
offset=len(cls.nodes.filter(active=True)) - (len(cls.nodes.filter(active=True)) % int(limit))-1,
limit=limit
)
list_of_nodes = [cls.inflate(row[0]) for row in results]
for this_node in list_of_nodes:
data['data'].append(this_node.get_resource_object())
r = make_response(jsonify(data))
r.status_code = http_error_codes.OK
r.headers['Content-Type'] = CONTENT_TYPE
return r
except ParameterNotSupported:
return application_codes.error_response([application_codes.PARAMETER_NOT_SUPPORTED_VIOLATION])
@classmethod
def get_resource(cls, request_args, id):
r"""
Used to fetch a single resource object with the given id in response to a GET request.\
get_resource should only be invoked on a resource when the client specifies a GET request.
:param request_args:
:return: The query parameters supplied with the request. currently supports include. See \
http://jsonapi.org/format/#fetching-includes
"""
try:
this_resource = cls.nodes.get(id=id, active=True)
try:
included = request_args.get('include').split(',')
except AttributeError:
included = []
r = this_resource.individual_resource_response(included)
except DoesNotExist:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
return r
@classmethod
def get_resource_or_collection(cls, request_args, id=None):
r"""
Deprecated for version 1.1.0. Please use get_resource or get_collection.
This function has multiple behaviors.
With id specified: Used to fetch a single resource object with the given id in response to a GET request.\
get_resource_or_collection should only be invoked on a resource when the client specifies a GET request.
With id not specified: Used to fetch a collection of resource object of type 'cls' in response to a GET request\
. get_resource_or_collection should only be invoked on a resource when the client specifies a GET request.
:param request_args: The query parameters supplied with the request. currently supports include, page[offset], \
and page[limit]. Pagination only applies to collection requests. See http://jsonapi.org/format/#fetching-pagination and \
http://jsonapi.org/format/#fetching-includes
:param id: The 'id' field of the node to fetch in the database. The id field must be set in the model -- it \
is not the same as the node id. If the id is not supplied the full collection will be returned.
:return: An HTTP response object in accordance with the specification at \
http://jsonapi.org/format/#fetching-resources
"""
if id:
try:
r = cls.get_resource(request_args)
except DoesNotExist:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
else:
try:
r = cls.get_collection(request_args)
except Exception as e:
r = application_codes.error_response([application_codes.BAD_FORMAT_VIOLATION])
return r
@classmethod
def create_resource(cls, request_json):
r"""
Used to create a node in the database of type 'cls' in response to a POST request. create_resource should only \
be invoked on a resource when the client specifies a POST request.
:param request_json: a dictionary formatted according to the specification at \
http://jsonapi.org/format/#crud-creating
:return: An HTTP response object in accordance with the same specification
"""
response = dict()
new_resource, location = None, None
try:
data = request_json['data']
if data['type'] != cls.__type__:
raise WrongTypeError('type must match the type of the resource being created.')
attributes = data.get('attributes')
if attributes:
for x in attributes.keys():
if x in cls.dates:
dt = datetime.strptime(attributes[x], '%Y-%m-%d')
attributes[x] = dt
new_resource = cls(**attributes)
new_resource.save()
enum_keys = new_resource.enums.keys()
for key in attributes.keys():
if key in enum_keys:
if attributes[key] in new_resource.enums[key]:
setattr(new_resource, key, attributes[key])
else:
raise EnumeratedTypeError
else:
setattr(new_resource, key, attributes[key])
new_resource.save()
for r in new_resource.hashed:
unhashed = getattr(new_resource, r)
if unhashed:
setattr(new_resource, r, hashlib.sha256(unhashed).hexdigest())
new_resource.save()
relationships = data.get('relationships')
if relationships:
for relation_name in relationships.keys():
relations = relationships.get(relation_name)
if relations:
relations = relations['data']
if isinstance(relations, list):
for relation in relations:
the_type = relation['type'] # must translate type to cls
the_id = relation['id']
the_class = cls.get_class_from_type(the_type)
new_resources_relation = the_class.nodes.get(id=the_id, active=True)
meta = relation.get('meta')
eval('new_resource.{relation_name}.connect(new_resources_relation, meta)'.format(
relation_name=relation_name)
)
new_resource.save()
else:
relation = relations
the_type = relation['type']
the_id = relation['id']
the_class = cls.get_class_from_type(the_type)
new_resources_relation = the_class.nodes.get(id=the_id, active=True)
meta = relation.get('meta')
eval('new_resource.{relation_name}.connect(new_resources_relation, meta)'.format(
relation_name=relation_name)
)
new_resource.save()
response['data'] = new_resource.get_resource_object()
response['links'] = {'self': new_resource.get_self_link()}
status_code = http_error_codes.CREATED
location = new_resource.get_self_link()
r = make_response(jsonify(response))
r.headers['Content-Type'] = "application/vnd.api+json; charset=utf-8"
if location and new_resource:
r.headers['Location'] = location
r.status_code = status_code
except UniqueProperty:
r = application_codes.error_response([application_codes.UNIQUE_KEY_VIOLATION])
try:
new_resource.delete()
except:
pass
except DoesNotExist:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
try:
new_resource.delete()
except:
pass
except WrongTypeError as e:
r = application_codes.error_response([application_codes.WRONG_TYPE_VIOLATION])
try:
new_resource.delete()
except:
pass
except KeyError as e:
r = application_codes.error_response([application_codes.BAD_FORMAT_VIOLATION])
print e
try:
new_resource.delete()
except:
pass
except EnumeratedTypeError:
r = application_codes.error_response([application_codes.ENUMERATED_TYPE_VIOLATION])
try:
new_resource.delete()
except:
pass
except ParameterMissing:
r = application_codes.error_response([application_codes.BAD_PARAMETER_VIOLATION])
try:
new_resource.delete()
except:
pass
return r
@classmethod
def update_resource(cls, request_json, id):
r"""
Used to update a node in the database of type 'cls' in response to a PATCH request. update_resource should only \
be invoked on a resource when the client specifies a PATCH request.
:param request_json: a dictionary formatted according to the specification at \
http://jsonapi.org/format/#crud-updating
:param id: The 'id' field of the node to update in the database. The id field must be set in the model -- it \
is not the same as the node id
:return: An HTTP response object in accordance with the same specification
"""
response = dict()
try:
this_resource = cls.nodes.get(id=id, active=True)
data = request_json['data']
if data['type'] != cls.__type__:
raise WrongTypeError('type must match the type of the resource being updated.')
attributes = data.get('attributes')
if attributes:
for x in attributes.keys():
if x in cls.dates:
dt = datetime.strptime(attributes[x], '%Y-%m-%d')
attributes[x] = dt
this_resource.updated = datetime.now()
this_resource.save()
enum_keys = this_resource.enums.keys()
for key in attributes.keys():
if key in enum_keys:
if attributes[key] in this_resource.enums[key]:
setattr(this_resource, key, attributes[key])
else:
raise EnumeratedTypeError
else:
setattr(this_resource, key, attributes[key])
this_resource.save()
for r in this_resource.hashed:
unhashed = getattr(this_resource, r)
setattr(this_resource, r, hashlib.sha256(unhashed).hexdigest())
this_resource.save()
relationships = data.get('relationships')
if relationships:
for relation_name in relationships.keys():
relations = relationships.get(relation_name)
for related_resource in eval('this_resource.{relation_name}.all()'.format(relation_name=relation_name)):
eval('this_resource.{relation_name}.disconnect(related_resource)'.
format(relation_name=relation_name))
if relations:
relations = relations['data']
if isinstance(relations, list):
for relation in relations:
the_type = relation['type']
the_id = relation['id']
the_class = cls.get_class_from_type(the_type)
new_resources_relation = the_class.nodes.get(id=the_id, active=True)
meta = relation.get('meta')
the_rel = eval(
'this_resource.{relation_name}.connect(new_resources_relation, meta)'.format(
relation_name=relation_name
)
)
else:
relation = relations
meta = relation.get('meta')
the_type = relation['type']
the_id = relation['id']
the_class = cls.get_class_from_type(the_type)
new_resources_relation = the_class.nodes.get(id=the_id, active=True)
eval('this_resource.{relation_name}.connect(new_resources_relation, meta)'.format(
relation_name=relation_name)
)
this_resource.updated = datetime.now()
this_resource.save()
response['data'] = this_resource.get_resource_object()
response['links'] = {'self': this_resource.get_self_link()}
status_code = http_error_codes.OK
location = this_resource.get_self_link()
r = make_response(jsonify(response))
r.headers['Content-Type'] = "application/vnd.api+json; charset=utf-8"
if location and this_resource:
r.headers['Location'] = location
r.status_code = status_code
except UniqueProperty as e:
print str(e)
r = application_codes.error_response([application_codes.UNIQUE_KEY_VIOLATION])
except DoesNotExist:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
except WrongTypeError as e:
r = application_codes.error_response([application_codes.WRONG_TYPE_VIOLATION])
except KeyError as e:
r = application_codes.error_response([application_codes.BAD_FORMAT_VIOLATION])
except EnumeratedTypeError:
r = application_codes.error_response([application_codes.ENUMERATED_TYPE_VIOLATION])
except ParameterMissing:
r = application_codes.error_response([application_codes.BAD_PARAMETER_VIOLATION])
return r
@classmethod
def set_resource_inactive(cls, id):
"""This method is deprecated for version 1.1.0. Please use deactivate_resource"""
return cls.deactivate_resource(cls, id)
@classmethod
def deactivate_resource(cls, id):
r"""
Used to deactivate a node of type 'cls' in response to a DELETE request. deactivate_resource should only \
be invoked on a resource when the client specifies a DELETE request.
:param id: The 'id' field of the node to update in the database. The id field must be set in the model -- it \
is not the same as the node id
:return: An HTTP response object in accordance with the specification at \
http://jsonapi.org/format/#crud-deleting
"""
try:
this_resource = cls.nodes.get(id=id, active=True)
this_resource.deactivate()
r = make_response('')
r.headers['Content-Type'] = "application/vnd.api+json; charset=utf-8"
r.status_code = http_error_codes.NO_CONTENT
except DoesNotExist:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
return r
@classmethod
def get_relationship(cls, request_args, id, related_collection_name, related_resource=None):
"""
Get a relationship
:param request_args:
:param id: The 'id' field of the node on the left side of the relationship in the database. The id field must \
be set in the model -- it is not the same as the node id
:param related_collection_name: The name of the relationship
:param related_resource: Deprecated for version 1.1.0
:return: A response according to the specification at http://jsonapi.org/format/#fetching-relationships
"""
try:
included = request_args.get('include').split(',')
except (SyntaxError, AttributeError):
included = []
try:
offset = request_args.get('page[offset]', 0)
limit = request_args.get('page[limit]', 20)
this_resource = cls.nodes.get(id=id, active=True)
if not related_resource:
if request_args.get('include'):
r = application_codes.error_response([application_codes.PARAMETER_NOT_SUPPORTED_VIOLATION])
else:
r = this_resource.relationship_collection_response(related_collection_name, offset, limit)
else: # deprecated for version 1.1.0
r = this_resource.individual_relationship_response(related_collection_name, related_resource, included)
except DoesNotExist:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
return r
@classmethod
def create_relationships(cls, id, related_collection_name, request_json):
r"""
Used to create relationship(s) between the id node and the nodes identified in the included resource \
identifier objects.
:param id: The 'id' field of the node on the left side of the relationship in the database. The id field must \
be set in the model -- it is not the same as the node id
:param related_collection_name: The name of the relationship
:param request_json: request_json: a dictionary formatted according to the specification at \
http://jsonapi.org/format/#crud-updating-relationships
:return: A response according to the same specification
"""
try:
this_resource = cls.nodes.get(id=id, active=True)
related_collection = getattr(this_resource, related_collection_name)
if type(related_collection) in (One, ZeroOrOne): # Cardinality <= 1 so update_relationship should be used
r = application_codes.error_response([application_codes.FORBIDDEN_VIOLATION])
else:
data = request_json['data']
for rsrc_identifier in data:
the_new_node = cls.get_class_from_type(rsrc_identifier['type']).nodes.get(id=rsrc_identifier['id'])
rel_attrs = rsrc_identifier.get('meta')
if not rel_attrs or isinstance(rel_attrs, dict):
related_collection.connect(the_new_node, rel_attrs)
else:
raise WrongTypeError
#r = this_resource.relationship_collection_response(related_collection_name)
r = make_response('')
r.status_code = http_error_codes.NO_CONTENT
r.headers['Content-Type'] = CONTENT_TYPE
except DoesNotExist:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
except (KeyError, TypeError, WrongTypeError):
r = application_codes.error_response([application_codes.BAD_FORMAT_VIOLATION])
except AttemptedCardinalityViolation:
r = application_codes.error_response([application_codes.ATTEMPTED_CARDINALITY_VIOLATION])
except MultipleNodesReturned:
r = application_codes.error_response([application_codes.MULTIPLE_NODES_WITH_ID_VIOLATION])
return r
@classmethod
def disconnect_relationship(cls, id, related_collection_name, request_json):
"""
Disconnect one or more relationship in a collection with cardinality 'Many'.
:param id: The 'id' field of the node on the left side of the relationship in the database. The id field must \
be set in the model -- it is not the same as the node id
:param related_collection_name: The name of the relationship
:param request_json: a dictionary formatted according to the specification at \
http://jsonapi.org/format/#crud-updating-relationships
:return: A response according to the same specification
"""
try:
this_resource = cls.nodes.get(id=id, active=True)
related_collection = getattr(this_resource, related_collection_name)
rsrc_identifier_list = request_json['data']
if not isinstance(rsrc_identifier_list, list):
raise WrongTypeError
for rsrc_identifier in rsrc_identifier_list:
connected_resource = cls.get_class_from_type(rsrc_identifier['type']).nodes.get(
id=rsrc_identifier['id']
)
related_collection.disconnect(connected_resource)
r = make_response('')
r.status_code = http_error_codes.NO_CONTENT
r.headers['Content-Type'] = CONTENT_TYPE
except DoesNotExist:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
except (KeyError, WrongTypeError):
r = application_codes.error_response([application_codes.BAD_FORMAT_VIOLATION])
return r
@classmethod
def delete_relationship(cls, id, related_collection_name, related_resource=None):
"""
Deprecated for version 1.1.0. Please use update_relationship
"""
try:
this_resource = cls.nodes.get(id=id, active=True)
if not related_resource:
r = this_resource.delete_relationship_collection(related_collection_name)
else:
r = this_resource.delete_individual_relationship(related_collection_name, related_resource)
except DoesNotExist:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
return r
@classmethod
def update_relationship(cls, id, related_collection_name, request_json):
r"""
Used to completely replace all the existing relationships with new ones.
:param id: The 'id' field of the node on the left side of the relationship in the database. The id field must \
be set in the model -- it is not the same as the node id
:param related_collection_name: The name of the relationship
:param request_json: a dictionary formatted according to the specification at \
http://jsonapi.org/format/#crud-updating-relationships
:return: A response according to the same specification
"""
try:
this_resource = cls.nodes.get(id=id, active=True)
related_collection = getattr(this_resource, related_collection_name)
data = request_json['data']
if type(related_collection) in (One, ZeroOrOne): # Cardinality <= 1 so is a single obj
if not data and related_collection.single(): # disconnect the resource
related_collection.disconnect(related_collection.single())
elif not data:
pass # There is already no connected resource
else:
the_new_node = cls.get_class_from_type(data['type']).nodes.get(id=data['id'])
if related_collection.single(): # update the relationship
related_collection.reconnect(related_collection.single(), the_new_node)
the_rel = eval('related_collection.relationship(the_new_node)'.format(
start_node=this_resource, relname=related_collection_name)
)
meta = data.get('meta')
if meta:
for k in meta.keys():
setattr(the_rel, k, meta[k])
the_rel.save()
else: # create the relationship
related_collection.connect(the_new_node, data.get('meta'))
else: # Cardinality > 1 so this is a collection of objects
old_nodes = related_collection.all()
for item in old_nodes: # removes all old connections
related_collection.disconnect(item)
for identifier in data: # adds all new connections
the_new_node = cls.get_class_from_type(identifier['type']).nodes.get(id=identifier['id'])
the_rel = related_collection.connect(the_new_node)
meta = identifier.get('meta')
if meta:
for k in meta.keys():
setattr(the_rel, k, meta[k])
the_rel.save()
r = make_response('')
r.status_code = http_error_codes.NO_CONTENT
r.headers['Content-Type'] = CONTENT_TYPE
except DoesNotExist:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
except (KeyError, TypeError):
r = application_codes.error_response([application_codes.BAD_FORMAT_VIOLATION])
except AttemptedCardinalityViolation:
r = application_codes.error_response([application_codes.ATTEMPTED_CARDINALITY_VIOLATION])
except MultipleNodesReturned:
r = application_codes.error_response([application_codes.MULTIPLE_NODES_WITH_ID_VIOLATION])
return r
@classmethod
def get_related_resources(cls, request_args, id, related_collection_name, related_resource=None):
try:
included = request_args.get('include').split(',')
except:
included = []
try:
this_resource = cls.nodes.get(id=id, active=True)
if not related_resource:
offset = request_args.get('page[offset]', 0)
limit = request_args.get('page[limit]', 20)
r = this_resource.related_resources_collection_response(related_collection_name, included, offset, limit)
else:
r = this_resource.related_resources_individual_response(related_collection_name, related_resource, included)
except DoesNotExist:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
return r
@classmethod
def set_related_resources_inactive(cls, id, related_collection_name, related_resource=None):
try:
this_resource = cls.nodes.get(id=id, active=True)
if not related_resource:
r = this_resource.set_related_resources_collection_inactive(related_collection_name)
else:
r = this_resource.set_individual_related_resource_inactive(related_collection_name, related_resource)
except DoesNotExist:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
return r
@classmethod
def get_class_from_type(cls, the_type):
for the_cls in cls.__base__.__subclasses__():
if the_cls.__type__ == the_type:
return the_cls
return None
class EnumeratedTypeError(Exception):
pass
class SerializableStructuredRel(StructuredRel):
r"""
The Base Relationship that all Structured Relationships must inherit from. All relationships should be structured \
starting version 1.1.0 -- okay to use model=SerializableStructuredRel
"""
secret = []
updated = DateTimeProperty(default=datetime.now())
created = DateTimeProperty(default=datetime.now())
type = StringProperty(default="serializable_structured_rel")
def get_resource_identifier_object(self, end_node):
try:
response = dict()
response['id'] = end_node.id
response['type'] = end_node.type
response['meta'] = dict()
props = self.defined_properties()
print self.__class__
for attr_name in props.keys():
print attr_name
if attr_name not in self.secret:
response['meta'][attr_name] = getattr(self, attr_name)
return response
except Exception as e:
print type(e), e
raise
@classmethod
def get_relclass_from_type(cls, the_type):
for the_cls in cls.__subclasses__():
if the_cls.__type__ == the_type:
return the_cls
return None
|
buckmaxwell/neoapi
|
neoapi/serializable_structured_node.py
|
Python
|
mit
| 53,407
|
# Welcome spoke classes
#
# Copyright (C) 2011-2012 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Chris Lumens <clumens@redhat.com>
# Vratislav Podzimek <vpodzime@redhat.com>
#
import sys
import re
import langtable
import os
from pyanaconda.ui.gui.hubs.summary import SummaryHub
from pyanaconda.ui.gui.spokes import StandaloneSpoke
from pyanaconda.ui.gui.utils import setup_gtk_direction, escape_markup, gtk_action_wait
from pyanaconda.ui.gui.xkl_wrapper import XklWrapper
from pyanaconda.ui.gui.spokes.lib.lang_locale_handler import LangLocaleHandler
from pyanaconda import localization
from pyanaconda.product import distributionText, isFinal, productName, productVersion
from pyanaconda import keyboard
from pyanaconda import flags
from pyanaconda import geoloc
from pyanaconda.i18n import _, C_
from pyanaconda.iutil import is_unsupported_hw, ipmi_report
from pyanaconda.constants import DEFAULT_LANG, DEFAULT_KEYBOARD, IPMI_ABORTED
import logging
log = logging.getLogger("anaconda")
__all__ = ["WelcomeLanguageSpoke"]
class WelcomeLanguageSpoke(LangLocaleHandler, StandaloneSpoke):
"""
.. inheritance-diagram:: WelcomeLanguageSpoke
:parts: 3
"""
mainWidgetName = "welcomeWindow"
focusWidgetName = "languageEntry"
uiFile = "spokes/welcome.glade"
helpFile = "WelcomeSpoke.xml"
builderObjects = ["languageStore", "languageStoreFilter", "localeStore",
"welcomeWindow", "betaWarnDialog", "unsupportedHardwareDialog"]
preForHub = SummaryHub
priority = 0
def __init__(self, *args, **kwargs):
StandaloneSpoke.__init__(self, *args, **kwargs)
LangLocaleHandler.__init__(self)
self._xklwrapper = XklWrapper.get_instance()
self._origStrings = {}
def apply(self):
(store, itr) = self._localeSelection.get_selected()
locale = store[itr][1]
self._set_lang(locale)
localization.setup_locale(locale, self.data.lang, text_mode=False)
# Skip timezone and keyboard default setting for kickstart installs.
# The user may have provided these values via kickstart and if not, we
# need to prompt for them.
if flags.flags.automatedInstall:
return
geoloc_timezone = geoloc.get_timezone()
loc_timezones = localization.get_locale_timezones(self.data.lang.lang)
if geoloc_timezone:
# (the geolocation module makes sure that the returned timezone is
# either a valid timezone or None)
self.data.timezone.timezone = geoloc_timezone
elif loc_timezones and not self.data.timezone.timezone:
# no data is provided by Geolocation, try to get timezone from the
# current language
self.data.timezone.timezone = loc_timezones[0]
self._set_keyboard_defaults(self.data.lang.lang)
def _set_keyboard_defaults(self, locale):
"""
Set default keyboard settings (layouts, layout switching).
:param locale: locale string (see localization.LANGCODE_RE)
:type locale: str
:return: list of preferred keyboard layouts
:rtype: list of strings
:raise InvalidLocaleSpec: if an invalid locale is given (see
localization.LANGCODE_RE)
"""
#remove all X layouts that are not valid X layouts (unsupported)
#from the ksdata
#XXX: could go somewhere else, but we need X running and we have
# XklWrapper instance here
for layout in self.data.keyboard.x_layouts:
if not self._xklwrapper.is_valid_layout(layout):
self.data.keyboard.x_layouts.remove(layout)
if self.data.keyboard.x_layouts:
#do not add layouts if there are any specified in the kickstart
return
layouts = localization.get_locale_keyboards(locale)
if layouts:
# take the first locale (with highest rank) from the list and
# store it normalized
new_layouts = [keyboard.normalize_layout_variant(layouts[0])]
if not langtable.supports_ascii(layouts[0]):
# does not support typing ASCII chars, append the default layout
new_layouts.append(DEFAULT_KEYBOARD)
else:
log.error("Failed to get layout for chosen locale '%s'", locale)
new_layouts = [DEFAULT_KEYBOARD]
self.data.keyboard.x_layouts = new_layouts
if flags.can_touch_runtime_system("replace runtime X layouts", touch_live=True):
self._xklwrapper.replace_layouts(new_layouts)
if len(new_layouts) >= 2 and not self.data.keyboard.switch_options:
#initialize layout switching if needed
self.data.keyboard.switch_options = ["grp:alt_shift_toggle"]
if flags.can_touch_runtime_system("init layout switching", touch_live=True):
self._xklwrapper.set_switching_options(["grp:alt_shift_toggle"])
# activate the language-default layout instead of the additional
# one
self._xklwrapper.activate_default_layout()
@property
def completed(self):
if flags.flags.automatedInstall and self.data.lang.seen:
return self.data.lang.lang and self.data.lang.lang != ""
else:
return False
def _row_is_separator(self, model, itr, *args):
return model[itr][3]
def initialize(self):
self._languageStore = self.builder.get_object("languageStore")
self._languageStoreFilter = self.builder.get_object("languageStoreFilter")
self._languageEntry = self.builder.get_object("languageEntry")
self._langSelection = self.builder.get_object("languageViewSelection")
self._langSelectedRenderer = self.builder.get_object("langSelectedRenderer")
self._langSelectedColumn = self.builder.get_object("langSelectedColumn")
self._langView = self.builder.get_object("languageView")
self._localeView = self.builder.get_object("localeView")
self._localeStore = self.builder.get_object("localeStore")
self._localeSelection = self.builder.get_object("localeViewSelection")
LangLocaleHandler.initialize(self)
# We need to tell the view whether something is a separator or not.
self._langView.set_row_separator_func(self._row_is_separator, None)
# We can use the territory from geolocation here
# to preselect the translation, when it's available.
territory = geoloc.get_territory_code(wait=True)
# bootopts and kickstart have priority over geoip
if self.data.lang.lang and self.data.lang.seen:
locales = [self.data.lang.lang]
else:
locales = localization.get_territory_locales(territory) or [DEFAULT_LANG]
# get the data models
filter_store = self._languageStoreFilter
store = filter_store.get_model()
# get language codes for the locales
langs = [localization.parse_langcode(locale)['language'] for locale in locales]
# check which of the geolocated languages have translations
# and store the iterators for those languages in a dictionary
langs_with_translations = {}
itr = store.get_iter_first()
while itr:
row_lang = store[itr][2]
if row_lang in langs:
langs_with_translations[row_lang] = itr
itr = store.iter_next(itr)
# if there are no translations for the given locales,
# use default
if not langs_with_translations:
self._set_lang(DEFAULT_LANG)
localization.setup_locale(DEFAULT_LANG, self.data.lang, text_mode=False)
lang_itr, _locale_itr = self._select_locale(self.data.lang.lang)
langs_with_translations[DEFAULT_LANG] = lang_itr
locales = [DEFAULT_LANG]
# go over all geolocated languages in reverse order
# and move those we have translation for to the top of the
# list, above the separator
for lang in reversed(langs):
itr = langs_with_translations.get(lang)
if itr:
store.move_after(itr, None)
else:
# we don't have translation for this language,
# so dump all locales for it
locales = [l for l in locales
if localization.parse_langcode(l)['language'] != lang]
# And then we add a separator after the selected best language
# and any additional languages (that have translations) from geoip
newItr = store.insert(len(langs_with_translations))
store.set(newItr, 0, "", 1, "", 2, "", 3, True)
# setup the "best" locale
self._set_lang(locales[0])
localization.setup_locale(locales[0], self.data.lang)
self._select_locale(self.data.lang.lang)
def _retranslate_one(self, widgetName, context=None):
widget = self.builder.get_object(widgetName)
if not widget:
return
if not widget in self._origStrings:
self._origStrings[widget] = widget.get_label()
before = self._origStrings[widget]
if context is not None:
widget.set_label(C_(context, before))
else:
widget.set_label(_(before))
def retranslate(self):
# Change the translations on labels and buttons that do not have
# substitution text.
for name in ["pickLanguageLabel", "betaWarnTitle", "betaWarnDesc"]:
self._retranslate_one(name)
# It would be nice to be able to read the translation context from the
# widget, but we live in an imperfect world.
# See also: https://bugzilla.gnome.org/show_bug.cgi?id=729066
for name in ["quitButton", "continueButton"]:
self._retranslate_one(name, "GUI|Welcome|Beta Warn Dialog")
# The welcome label is special - it has text that needs to be
# substituted.
welcomeLabel = self.builder.get_object("welcomeLabel")
welcomeLabel.set_text(_("WELCOME TO %(name)s %(version)s.") %
{"name" : productName.upper(), "version" : productVersion})
# Retranslate the language (filtering) entry's placeholder text
languageEntry = self.builder.get_object("languageEntry")
if not languageEntry in self._origStrings:
self._origStrings[languageEntry] = languageEntry.get_placeholder_text()
languageEntry.set_placeholder_text(_(self._origStrings[languageEntry]))
# And of course, don't forget the underlying window.
self.window.set_property("distribution", distributionText().upper())
self.window.retranslate()
def refresh(self):
self._select_locale(self.data.lang.lang)
self._languageEntry.set_text("")
self._languageStoreFilter.refilter()
def _add_language(self, store, native, english, lang):
native_span = '<span lang="%s">%s</span>' % \
(escape_markup(lang),
escape_markup(native))
store.append([native_span, english, lang, False])
def _add_locale(self, store, native, locale):
native_span = '<span lang="%s">%s</span>' % \
(escape_markup(re.sub(r'\..*', '', locale)),
escape_markup(native))
store.append([native_span, locale])
# Signal handlers.
def on_lang_selection_changed(self, selection):
(_store, selected) = selection.get_selected_rows()
LangLocaleHandler.on_lang_selection_changed(self, selection)
if not selected and hasattr(self.window, "set_may_continue"):
self.window.set_may_continue(False)
def on_locale_selection_changed(self, selection):
(store, selected) = selection.get_selected_rows()
if hasattr(self.window, "set_may_continue"):
self.window.set_may_continue(len(selected) > 0)
if selected:
lang = store[selected[0]][1]
self._set_lang(lang)
localization.setup_locale(lang)
self.retranslate()
# Reset the text direction
setup_gtk_direction()
# Redraw the window to reset the sidebar to where it needs to be
self.window.queue_draw()
# Override the default in StandaloneSpoke so we can display the beta
# warning dialog first.
def _on_continue_clicked(self, window, user_data=None):
# Don't display the betanag dialog if this is the final release or
# when autostep has been requested as betanag breaks the autostep logic.
if not isFinal and not self.data.autostep.seen:
dlg = self.builder.get_object("betaWarnDialog")
with self.main_window.enlightbox(dlg):
rc = dlg.run()
dlg.hide()
if rc != 1:
ipmi_report(IPMI_ABORTED)
sys.exit(0)
if productName.startswith("Red Hat ") and \
is_unsupported_hw() and not self.data.unsupportedhardware.unsupported_hardware:
dlg = self.builder.get_object("unsupportedHardwareDialog")
with self.main_window.enlightbox(dlg):
rc = dlg.run()
dlg.destroy()
if rc != 1:
ipmi_report(IPMI_ABORTED)
sys.exit(0)
StandaloneSpoke._on_continue_clicked(self, window, user_data)
@gtk_action_wait
def _set_lang(self, lang):
# This is *hopefully* safe. The only threads that might be running
# outside of the GIL are those doing file operations, the Gio dbus
# proxy thread, and calls from the Gtk main loop. The file operations
# won't be doing things that may access the environment, fingers
# crossed, the GDbus thread shouldn't be doing anything weird since all
# of our dbus calls are from python and synchronous. Using
# gtk_action_wait ensures that this is Gtk main loop thread, and it's
# holding the GIL.
#
# There is a lot of uncertainty and weasliness in those statements.
# This is not good code.
#
# We cannot get around setting $LANG. Python's gettext implementation
# differs from C in that consults only the environment for the current
# language and not the data set via setlocale. If we want translations
# from python modules to work, something needs to be set in the
# environment when the language changes.
# pylint: disable=environment-modify
os.environ["LANG"] = lang
|
marmarek/anaconda
|
pyanaconda/ui/gui/spokes/welcome.py
|
Python
|
gpl-2.0
| 15,586
|
# http://www.codewars.com/kata/52ad1db4b2651f744d000394
import math
def reindeer(presents):
return 2 + math.ceil(float(presents) / 30)
|
Bodigrim/katas
|
src/python/6-How-Many-Reindeers.py
|
Python
|
bsd-2-clause
| 137
|
#!/usr/bin/env python3
# -*- mode: python; indent-tabs-mode: nil; c-basic-offset: 4; tab-width: 4; -*-
# vim: set shiftwidth=4 softtabstop=4 expandtab:
"""Support for reading from an NCAR EOL RAF PostgreSQL database of
real-time flight data.
2014 Copyright University Corporation for Atmospheric Research
This file is part of the "django-ncharts" package.
The license and distribution terms for this file may be found in the
file LICENSE in this package.
"""
from datetime import datetime
import logging
import sys
import threading
import pytz
import numpy as np
import psycopg2
from ncharts import exceptions as nc_exc
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
_logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class RAFDatabase(object):
"""Support for reading time series from NCAR EOL RAF PostgreSQL database.
"""
__cached_connections = {}
__cache_lock = threading.Lock()
@staticmethod
def get_connection(
database="real-time-GV",
user="ads",
host="eol-rt-data.fl-ext.ucar.edu",
port=5432,
password=None):
"""Return a psycopg2 database connection.
The returned connection can be shared between threads.
If the connection is kept open, then for a given
database, user, host and port, this method
will always return the same connection.
Args:
database, user, host, port password: Parameters needed
to establish a connection to the PostgreSQL database.
Returns:
A psycopg2.connection
Raises:
psycopg2.Error
"""
hashval = hash(database + user + host + str(port))
with RAFDatabase.__cache_lock:
conn = None
if hashval in RAFDatabase.__cached_connections:
conn = RAFDatabase.__cached_connections[hashval]
# connection closed: nonzero if it is closed or broken.
# Mainly just checking here if it is broken, in which
# case, close and attempt a re-connect.
if conn.closed:
try:
conn.rollback()
except psycopg2.Error as exc:
_logger.warning("%s rollback: %s", conn, exc)
try:
conn.close()
except psycopg2.Error as exc:
_logger.warning("%s close: %s", conn, exc)
del RAFDatabase.__cached_connections[hashval]
conn = None
if not conn:
conn = psycopg2.connect(
database=database, user=user,
host=host, port=port, password=password)
conn.set_session(
isolation_level="READ COMMITTED",
readonly=True)
RAFDatabase.__cached_connections[hashval] = conn
return conn
@staticmethod
def close_connection(conn):
"""Close a psycopg2 database connection.
Args:
conn: connection to close.
Raises:
nothing
According to http://initd.org/psycopg/docs/connection.html:
Changed in version 2.5: if the connection is used in a with
statement, the (rollback) method is automatically called if
an exception is raised in the with block.
All connections here are used in a with statement, so we
don't have to call rollback() before close.
"""
with RAFDatabase.__cache_lock:
for (hashval, cconn) in RAFDatabase.__cached_connections.items():
if conn == cconn:
try:
conn.close()
except psycopg2.Error as exc:
_logger.warning("%s close: %s", conn, exc)
del RAFDatabase.__cached_connections[hashval]
break
def __init__(
self,
database="real-time-GV",
user="ads",
host="eol-rt-data.fl-ext.ucar.edu",
port=5432,
password=None,
table="raf_lrt"):
"""Construct an instance of RAF database connection.
Args:
database, user, host, port, password: Usual parameters
needed to create a PostgreSQL connection.
table: name of table in the database which contains
the time-series data to be read.
Raises:
nc_exc.NoDataException
"""
try:
self.conn = RAFDatabase.get_connection(
database=database, user=user,
host=host, port=port, password=password)
self.database = database
self.user = user
self.host = host
self.port = port
self.password = password
self.table = table
except psycopg2.Error as exc:
raise nc_exc.NoDataException(
"Database not available: {}".format(exc))
def get_variables(self):
"""Fetch pertinent fields from the 'variable_list' table in
the RAF database, such as the list of variable names, their units, and
missing values.
Raises:
nc_exc.NoDataException
"""
try:
with self.conn as conn:
with conn.cursor() as cur:
cur.execute("\
SELECT name, units, long_name, ndims, dims, missing_value from variable_list;")
variables = {}
for var in cur:
dimnames = ["time"]
# make a bold assumption that a second dimension
# is a particle-probe bin number
if var[3] > 1:
dimnames.append("bin")
variables[var[0]] = {
"units": var[1],
"long_name": var[2],
"dimnames": dimnames,
"shape": var[4]
}
return variables
except psycopg2.Error as exc:
# psycopg.connections are thread safe
RAFDatabase.close_connection(conn)
raise nc_exc.NoDataException(
"No variables found: {}".format(exc))
def read_times(
self,
start_time=pytz.utc.localize(datetime.min),
end_time=pytz.utc.localize(datetime.max)):
"""Read datetimes from the table within a range.
Raises:
nc_exc.NoDataException
"""
start_time = start_time.replace(tzinfo=None)
end_time = end_time.replace(tzinfo=None)
# _logger.debug("read_times, table=%s", self.table)
vname = "datetime"
try:
with self.conn as conn:
with conn.cursor() as cur:
# datetimes in database are returned to python as timezone naive.
cur.execute(
"SELECT {} FROM {} WHERE {} >= %s AND {} < %s;"
.format(vname, self.table, vname, vname),
(start_time, end_time))
return [pytz.utc.localize(x[0]).timestamp() for x in cur]
except psycopg2.Error as exc:
RAFDatabase.close_connection(conn)
raise nc_exc.NoDataException(
"read {}: {}".format(vname, exc))
def get_start_time(self):
"""Read first datatime from the database table.
Raises:
nc_exc.NoDataException
"""
vname = "datetime"
try:
with self.conn as conn:
with conn.cursor() as cur:
# datetimes in database are returned to python as timezone naive.
cur.execute(
"SELECT {} FROM {} FETCH FIRST 1 ROW ONLY;"
.format(vname, self.table))
start_time = cur.fetchone()
if not start_time:
_logger.warning("%s: read %s: no data", conn, vname)
raise nc_exc.NoDataException("read {}".format(vname))
return pytz.utc.localize(start_time[0])
except psycopg2.Error as exc:
_logger.warning("%s: read %s: %s", conn, vname, exc)
RAFDatabase.close_connection(conn)
raise nc_exc.NoDataException("read {}: {}".format(vname, exc))
def read_time_series(
self,
variables=(),
start_time=pytz.utc.localize(datetime.min),
end_time=pytz.utc.localize(datetime.max),
size_limit=1000 * 1000 * 1000):
"""Read times and variables from the table within a time period.
For each variable, its missing_value will be read from the
variable_list table. Values read from the time series table
which match the missing_value will be set to float('nan').
Args:
variables: list or tuple of variable names to read.
start_time: starting datetime of data to be read.
end_time: ending datetime of data to be read.
size_limit: attempt to screen outrageous requests.
Returns:
A one element dict, compatible with that returned by
netcdf.read_time_series(), containing for a series_name of '':
{
'time' : list of UTC timestamps,
'data': lists of numpy.ndarray containing
the data for each variable,
'vmap': dict by variable name,
containing the index into the series data for the variable,
'dim2': dict by variable name, of values for second
dimension of the data, such as height.
}
Raises:
nc_exc.NoDataException
"""
total_size = 0
start_time = start_time.replace(tzinfo=None)
end_time = end_time.replace(tzinfo=None)
vtime = self.read_times(start_time=start_time, end_time=end_time)
# _logger.debug("read_times, len=%d", len(vtime))
total_size += sys.getsizeof(vtime)
if total_size > size_limit:
raise nc_exc.TooMuchDataException(
"too many time values requested, size={0} MB".\
format(total_size/(1000 * 1000)))
vdata = []
vmap = {}
vdim2 = {}
try:
with self.conn as conn:
with conn.cursor() as cur:
for vname in variables:
operation = "read variable_list"
# _logger.debug("vname=%s",vname)
cur.execute(
"SELECT dims, missing_value from variable_list where name=%s;",
(vname,))
vinfo = cur.fetchall()
# _logger.debug("vinfo=%s",vinfo)
dims = vinfo[0][0]
dims[0] = len(vtime)
missval = vinfo[0][1]
if len(dims) > 1:
# In initial CSET data, dims for CUHSAS_RWOOU
# in variable_list was [1,99]
# Seems that the 99 should have been 100,
# which is what is returned by this:
operation = "read dimension of {}".format(vname)
cur.execute("\
SELECT array_upper({},1) FROM {} FETCH FIRST 1 ROW ONLY;\
".format(vname, self.table))
dimsx = cur.fetchall()[0]
dims[1] = dimsx[0]
# _logger.debug("vname=%s, dims=%s, dimsx=%s", vname, dims, dimsx)
operation = "read {}".format(vname)
cur.execute("\
SELECT {} FROM {} WHERE datetime >= %s AND datetime < %s;\
".format(vname, self.table), (start_time, end_time))
cdata = np.ma.masked_values(np.ndarray(
shape=dims, buffer=np.array(
[v for v in cur], dtype=float)), value=missval)
if isinstance(cdata, np.ma.core.MaskedArray):
# _logger.debug("is MaskedArray")
cdata = cdata.filled(fill_value=float('nan'))
total_size += sys.getsizeof(cdata)
if total_size > size_limit:
raise nc_exc.TooMuchDataException(
"too many values requested, size={0} MB".\
format(total_size/(1000 * 1000)))
vindex = len(vdata)
vdata.append(cdata)
vmap[vname] = vindex
if len(dims) > 1:
vdim2[vname] = {
"data": [i for i in range(dims[1])],
"name": "bin",
"units": ""
}
return {
'': {
'time': vtime,
'data': vdata,
'vmap': vmap,
'dim2': vdim2,
}
}
except psycopg2.Error as exc:
RAFDatabase.close_connection(conn)
raise nc_exc.NoDataException(
(operation + ": {}").format(exc))
def test_func():
""" """
db = RAFDatabase(
database="real-time-GV", user="ads",
host="eol-rt-data.fl-ext.ucar.edu",
port=5432,
table="raf_lrt")
variables = db.get_variables()
time0 = db.get_start_time()
_logger.debug("time0=%s", time0)
# times = db.read_times()
# _logger.debug("all times=%s",times)
t1 = pytz.utc.localize(datetime(2015, 6, 29, 15, 10, 0))
t2 = pytz.utc.localize(datetime(2015, 6, 29, 15, 11, 0))
times = db.read_times(start_time=t1, end_time=t2)
_logger.debug("times=%s", times)
data = db.read_time_series(("TASX",), start_time=t1, end_time=t2)
_logger.debug("data=%s", data)
data = db.read_time_series(("CUHSAS_RWOOU",), start_time=t1, end_time=t2)
_logger.debug("data=%s", data)
RAFDatabase.close_connection(db)
if __name__ == '__main__':
test_func()
|
ncareol/ncharts
|
ncharts/raf_database.py
|
Python
|
bsd-2-clause
| 14,670
|
# pylint: disable=E1101
'''
Utilities for contentstore tests
'''
import json
from django.test.client import Client
from django.contrib.auth.models import User
from xmodule.contentstore.django import contentstore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.inheritance import own_metadata
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.xml_importer import import_from_xml
from student.models import Registration
from opaque_keys.edx.locations import SlashSeparatedCourseKey, AssetLocation
from contentstore.utils import reverse_url
from xmodule.modulestore.split_mongo.split import SplitMongoModuleStore
def parse_json(response):
"""Parse response, which is assumed to be json"""
return json.loads(response.content)
def user(email):
"""look up a user by email"""
return User.objects.get(email=email)
def registration(email):
"""look up registration object by email"""
return Registration.objects.get(user__email=email)
class AjaxEnabledTestClient(Client):
"""
Convenience class to make testing easier.
"""
def ajax_post(self, path, data=None, content_type="application/json", **kwargs):
"""
Convenience method for client post which serializes the data into json and sets the accept type
to json
"""
if not isinstance(data, basestring):
data = json.dumps(data or {})
kwargs.setdefault("HTTP_X_REQUESTED_WITH", "XMLHttpRequest")
kwargs.setdefault("HTTP_ACCEPT", "application/json")
return self.post(path=path, data=data, content_type=content_type, **kwargs)
def get_html(self, path, data=None, follow=False, **extra):
"""
Convenience method for client.get which sets the accept type to html
"""
return self.get(path, data or {}, follow, HTTP_ACCEPT="text/html", **extra)
def get_json(self, path, data=None, follow=False, **extra):
"""
Convenience method for client.get which sets the accept type to json
"""
return self.get(path, data or {}, follow, HTTP_ACCEPT="application/json", **extra)
class CourseTestCase(ModuleStoreTestCase):
def setUp(self):
"""
These tests need a user in the DB so that the django Test Client can log them in.
The test user is created in the ModuleStoreTestCase setUp method.
They inherit from the ModuleStoreTestCase class so that the mongodb collection
will be cleared out before each test case execution and deleted
afterwards.
"""
user_password = super(CourseTestCase, self).setUp()
self.client = AjaxEnabledTestClient()
self.client.login(username=self.user.username, password=user_password)
self.course = CourseFactory.create(
org='MITx',
number='999',
display_name='Robot Super Course',
)
def create_non_staff_authed_user_client(self, authenticate=True):
"""
Create a non-staff user, log them in (if authenticate=True), and return the client, user to use for testing.
"""
nonstaff, password = self.create_non_staff_user()
client = Client()
if authenticate:
client.login(username=nonstaff.username, password=password)
nonstaff.is_authenticated = True
return client, nonstaff
def populate_course(self):
"""
Add 2 chapters, 4 sections, 8 verticals, 16 problems to self.course (branching 2)
"""
user_id = self.user.id
def descend(parent, stack):
xblock_type = stack.pop(0)
for _ in range(2):
child = ItemFactory.create(category=xblock_type, parent_location=parent.location, user_id=user_id)
if stack:
descend(child, stack)
descend(self.course, ['chapter', 'sequential', 'vertical', 'problem'])
def reload_course(self):
"""
Reloads the course object from the database
"""
self.course = self.store.get_course(self.course.id)
def save_course(self):
"""
Updates the course object in the database
"""
self.course.save()
self.store.update_item(self.course, self.user.id)
TEST_VERTICAL = 'vertical_test'
PRIVATE_VERTICAL = 'a_private_vertical'
PUBLISHED_VERTICAL = 'a_published_vertical'
SEQUENTIAL = 'vertical_sequential'
LOCKED_ASSET_KEY = AssetLocation.from_deprecated_string('/c4x/edX/toy/asset/sample_static.txt')
def import_and_populate_course(self):
"""
Imports the test toy course and populates it with additional test data
"""
content_store = contentstore()
import_from_xml(self.store, self.user.id, 'common/test/data/', ['toy'], static_content_store=content_store)
course_id = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
# create an Orphan
# We had a bug where orphaned draft nodes caused export to fail. This is here to cover that case.
vertical = self.store.get_item(course_id.make_usage_key('vertical', self.TEST_VERTICAL), depth=1)
vertical.location = vertical.location.replace(name='no_references')
self.store.update_item(vertical, self.user.id, allow_not_found=True)
orphan_vertical = self.store.get_item(vertical.location)
self.assertEqual(orphan_vertical.location.name, 'no_references')
self.assertEqual(len(orphan_vertical.children), len(vertical.children))
# create a Draft vertical
vertical = self.store.get_item(course_id.make_usage_key('vertical', self.TEST_VERTICAL), depth=1)
draft_vertical = self.store.convert_to_draft(vertical.location, self.user.id)
self.assertTrue(self.store.has_published_version(draft_vertical))
# create a Private (draft only) vertical
private_vertical = self.store.create_item(self.user.id, course_id, 'vertical', self.PRIVATE_VERTICAL)
self.assertFalse(self.store.has_published_version(private_vertical))
# create a Published (no draft) vertical
public_vertical = self.store.create_item(self.user.id, course_id, 'vertical', self.PUBLISHED_VERTICAL)
public_vertical = self.store.publish(public_vertical.location, self.user.id)
self.assertTrue(self.store.has_published_version(public_vertical))
# add the new private and new public as children of the sequential
sequential = self.store.get_item(course_id.make_usage_key('sequential', self.SEQUENTIAL))
sequential.children.append(private_vertical.location)
sequential.children.append(public_vertical.location)
self.store.update_item(sequential, self.user.id)
# lock an asset
content_store.set_attr(self.LOCKED_ASSET_KEY, 'locked', True)
# create a non-portable link - should be rewritten in new courses
html_module = self.store.get_item(course_id.make_usage_key('html', 'nonportable'))
new_data = html_module.data = html_module.data.replace(
'/static/',
'/c4x/{0}/{1}/asset/'.format(course_id.org, course_id.course)
)
self.store.update_item(html_module, self.user.id)
html_module = self.store.get_item(html_module.location)
self.assertEqual(new_data, html_module.data)
return course_id
def check_populated_course(self, course_id):
"""
Verifies the content of the given course, per data that was populated in import_and_populate_course
"""
items = self.store.get_items(
course_id,
qualifiers={'category': 'vertical'},
revision=ModuleStoreEnum.RevisionOption.published_only
)
self.check_verticals(items)
def verify_item_publish_state(item, publish_state):
"""Verifies the publish state of the item is as expected."""
self.assertEqual(self.store.has_published_version(item), publish_state)
def get_and_verify_publish_state(item_type, item_name, publish_state):
"""Gets the given item from the store and verifies the publish state of the item is as expected."""
item = self.store.get_item(course_id.make_usage_key(item_type, item_name))
verify_item_publish_state(item, publish_state)
return item
# verify that the draft vertical is draft
vertical = get_and_verify_publish_state('vertical', self.TEST_VERTICAL, True)
for child in vertical.get_children():
verify_item_publish_state(child, True)
# make sure that we don't have a sequential that is not in draft mode
sequential = get_and_verify_publish_state('sequential', self.SEQUENTIAL, True)
# verify that we have the private vertical
private_vertical = get_and_verify_publish_state('vertical', self.PRIVATE_VERTICAL, False)
# verify that we have the public vertical
public_vertical = get_and_verify_publish_state('vertical', self.PUBLISHED_VERTICAL, True)
# verify verticals are children of sequential
for vert in [vertical, private_vertical, public_vertical]:
self.assertIn(vert.location, sequential.children)
# verify textbook exists
course = self.store.get_course(course_id)
self.assertGreater(len(course.textbooks), 0)
# verify asset attributes of locked asset key
self.assertAssetsEqual(self.LOCKED_ASSET_KEY, self.LOCKED_ASSET_KEY.course_key, course_id)
# verify non-portable links are rewritten
html_module = self.store.get_item(course_id.make_usage_key('html', 'nonportable'))
self.assertIn('/static/foo.jpg', html_module.data)
return course
def assertCoursesEqual(self, course1_id, course2_id):
"""
Verifies the content of the two given courses are equal
"""
course1_items = self.store.get_items(course1_id)
course2_items = self.store.get_items(course2_id)
self.assertGreater(len(course1_items), 0) # ensure it found content instead of [] == []
if len(course1_items) != len(course2_items):
course1_block_ids = set([item.location.block_id for item in course1_items])
course2_block_ids = set([item.location.block_id for item in course2_items])
raise AssertionError(
u"Course1 extra blocks: {}; course2 extra blocks: {}".format(
course1_block_ids - course2_block_ids, course2_block_ids - course1_block_ids
)
)
for course1_item in course1_items:
course1_item_loc = course1_item.location
course2_item_loc = course2_id.make_usage_key(course1_item_loc.block_type, course1_item_loc.block_id)
if course1_item_loc.block_type == 'course':
# mongo uses the run as the name, split uses 'course'
store = self.store._get_modulestore_for_courseid(course2_id) # pylint: disable=protected-access
new_name = 'course' if isinstance(store, SplitMongoModuleStore) else course2_item_loc.run
course2_item_loc = course2_item_loc.replace(name=new_name)
course2_item = self.store.get_item(course2_item_loc)
# compare published state
self.assertEqual(
self.store.has_published_version(course1_item),
self.store.has_published_version(course2_item)
)
# compare data
self.assertEqual(hasattr(course1_item, 'data'), hasattr(course2_item, 'data'))
if hasattr(course1_item, 'data'):
self.assertEqual(course1_item.data, course2_item.data)
# compare meta-data
self.assertEqual(own_metadata(course1_item), own_metadata(course2_item))
# compare children
self.assertEqual(course1_item.has_children, course2_item.has_children)
if course1_item.has_children:
expected_children = []
for course1_item_child in course1_item.children:
expected_children.append(
course2_id.make_usage_key(course1_item_child.block_type, course1_item_child.block_id)
)
self.assertEqual(expected_children, course2_item.children)
# compare assets
content_store = self.store.contentstore
course1_assets, count_course1_assets = content_store.get_all_content_for_course(course1_id)
_, count_course2_assets = content_store.get_all_content_for_course(course2_id)
self.assertEqual(count_course1_assets, count_course2_assets)
for asset in course1_assets:
asset_son = asset.get('content_son', asset['_id'])
self.assertAssetsEqual(asset_son, course1_id, course2_id)
def check_verticals(self, items):
""" Test getting the editing HTML for each vertical. """
# assert is here to make sure that the course being tested actually has verticals (units) to check.
self.assertGreater(len(items), 0, "Course has no verticals (units) to check")
for descriptor in items:
resp = self.client.get_html(get_url('container_handler', descriptor.location))
self.assertEqual(resp.status_code, 200)
def assertAssetsEqual(self, asset_son, course1_id, course2_id):
"""Verifies the asset of the given key has the same attributes in both given courses."""
content_store = contentstore()
category = asset_son.block_type if hasattr(asset_son, 'block_type') else asset_son['category']
filename = asset_son.block_id if hasattr(asset_son, 'block_id') else asset_son['name']
course1_asset_attrs = content_store.get_attrs(course1_id.make_asset_key(category, filename))
course2_asset_attrs = content_store.get_attrs(course2_id.make_asset_key(category, filename))
self.assertEqual(len(course1_asset_attrs), len(course2_asset_attrs))
for key, value in course1_asset_attrs.iteritems():
if key in ['_id', 'filename', 'uploadDate', 'content_son', 'thumbnail_location']:
pass
else:
self.assertEqual(value, course2_asset_attrs[key])
def get_url(handler_name, key_value, key_name='usage_key_string', kwargs=None):
"""
Helper function for getting HTML for a page in Studio and checking that it does not error.
"""
return reverse_url(handler_name, key_name, key_value, kwargs)
|
epam-mooc/edx-platform
|
cms/djangoapps/contentstore/tests/utils.py
|
Python
|
agpl-3.0
| 14,639
|
from gluon.contrib.appconfig import AppConfig
myconf = AppConfig(reload=True)
response.formstyle = myconf.take('forms.formstyle') # or 'bootstrap3_stacked' or 'bootstrap2' or other
response.form_label_separator = myconf.take('forms.separator')
db = DAL('sqlite://storage.sqlite')
from gluon.tools import Auth, Service, PluginManager
auth = Auth(db)
service = Service()
plugins = PluginManager()
db.define_table(auth.settings.table_user_name,
Field('first_name', length=128, default=''),
Field('last_name', length=128, default=''),
Field('username', length=128, default='',unique=True),
Field('email', length=128, default='', unique=True),
Field('password', 'password', length=512,readable=False, label='Password'),
Field('registration_key', length=512,writable=False, readable=False, default=''),
Field('reset_password_key', length=512,writable=False, readable=False, default=''),
Field('registration_id', length=512,writable=False, readable=False, default=''))
custom_auth_table = db[auth.settings.table_user_name] # get the custom_auth_table
custom_auth_table.first_name.requires = \
IS_NOT_EMPTY(error_message=auth.messages.is_empty)
custom_auth_table.last_name.requires = \
IS_NOT_EMPTY(error_message=auth.messages.is_empty)
custom_auth_table.password.requires = [IS_STRONG(min=8,upper=1,lower=1,special=1), CRYPT()]
custom_auth_table.username.requires = [IS_NOT_EMPTY(),IS_NOT_IN_DB(db,custom_auth_table.username)]
custom_auth_table.email.requires = [IS_EMAIL(error_message=auth.messages.invalid_email),IS_NOT_IN_DB(db, custom_auth_table.email)]
auth.settings.table_user = custom_auth_table # tell auth to use custom_auth_table
## create all tables needed by auth if not custom tables
auth.define_tables(username=True)
## configure email
mail = auth.settings.mailer
mail.settings.server = 'logging' if request.is_local else myconf.take('smtp.server')
mail.settings.sender = myconf.take('smtp.sender')
mail.settings.login = myconf.take('smtp.login')
## configure auth policy
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.settings.reset_password_requires_verification = True
db.define_table('image',
Field('title',unique=True),
Field('author'),
Field('date','date'),
Field('email'),
Field('description', 'text'),
Field('file','upload'),format='%(title)s'
)
db.image.title.requires = IS_NOT_IN_DB(db, db.image.title)
db.image.description.requires = IS_NOT_EMPTY()
db.image.date.requires = IS_NOT_EMPTY()
db.image.author.requires = IS_NOT_EMPTY()
db.image.email.requires = IS_EMAIL() and IS_NOT_EMPTY()
db.image.email.requires = IS_IMAGE()
db.define_table('post',
Field('recipe', 'reference image'),
Field('visitor'),
Field('email'),
Field('comment', 'text'))
db.post.recipe.requires = IS_IN_DB(db, db.image.id, '%(title)s')
db.post.visitor.requires = IS_NOT_EMPTY()
db.post.email.requires = IS_EMAIL()
db.post.comment.requires = IS_NOT_EMPTY()
db.post.recipe.writable = db.post.recipe.readable = False
|
Priyansh2/test
|
web2py/cookbook/models/db.py
|
Python
|
gpl-3.0
| 3,292
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Collections of messages and their translations, called cliques. Also
collections of cliques (uber-cliques).
'''
import re
import types
from grit import constants
from grit import exception
from grit import lazy_re
from grit import pseudo
from grit import pseudo_rtl
from grit import tclib
class UberClique(object):
'''A factory (NOT a singleton factory) for making cliques. It has several
methods for working with the cliques created using the factory.
'''
def __init__(self):
# A map from message ID to list of cliques whose source messages have
# that ID. This will contain all cliques created using this factory.
# Different messages can have the same ID because they have the
# same translateable portion and placeholder names, but occur in different
# places in the resource tree.
#
# Each list of cliques is kept sorted by description, to achieve
# stable results from the BestClique method, see below.
self.cliques_ = {}
# A map of clique IDs to list of languages to indicate translations where we
# fell back to English.
self.fallback_translations_ = {}
# A map of clique IDs to list of languages to indicate missing translations.
self.missing_translations_ = {}
def _AddMissingTranslation(self, lang, clique, is_error):
tl = self.fallback_translations_
if is_error:
tl = self.missing_translations_
id = clique.GetId()
if id not in tl:
tl[id] = {}
if lang not in tl[id]:
tl[id][lang] = 1
def HasMissingTranslations(self):
return len(self.missing_translations_) > 0
def MissingTranslationsReport(self):
'''Returns a string suitable for printing to report missing
and fallback translations to the user.
'''
def ReportTranslation(clique, langs):
text = clique.GetMessage().GetPresentableContent()
# The text 'error' (usually 'Error:' but we are conservative)
# can trigger some build environments (Visual Studio, we're
# looking at you) to consider invocation of grit to have failed,
# so we make sure never to output that word.
extract = re.sub('(?i)error', 'REDACTED', text[0:40])[0:40]
ellipsis = ''
if len(text) > 40:
ellipsis = '...'
langs_extract = langs[0:6]
describe_langs = ','.join(langs_extract)
if len(langs) > 6:
describe_langs += " and %d more" % (len(langs) - 6)
return " %s \"%s%s\" %s" % (clique.GetId(), extract, ellipsis,
describe_langs)
lines = []
if len(self.fallback_translations_):
lines.append(
"WARNING: Fell back to English for the following translations:")
for (id, langs) in self.fallback_translations_.items():
lines.append(ReportTranslation(self.cliques_[id][0], langs.keys()))
if len(self.missing_translations_):
lines.append("ERROR: The following translations are MISSING:")
for (id, langs) in self.missing_translations_.items():
lines.append(ReportTranslation(self.cliques_[id][0], langs.keys()))
return '\n'.join(lines)
def MakeClique(self, message, translateable=True):
'''Create a new clique initialized with a message.
Args:
message: tclib.Message()
translateable: True | False
'''
clique = MessageClique(self, message, translateable)
# Enable others to find this clique by its message ID
if message.GetId() in self.cliques_:
presentable_text = clique.GetMessage().GetPresentableContent()
if not message.HasAssignedId():
for c in self.cliques_[message.GetId()]:
assert c.GetMessage().GetPresentableContent() == presentable_text
self.cliques_[message.GetId()].append(clique)
# We need to keep each list of cliques sorted by description, to
# achieve stable results from the BestClique method, see below.
self.cliques_[message.GetId()].sort(
key=lambda c:c.GetMessage().GetDescription())
else:
self.cliques_[message.GetId()] = [clique]
return clique
def FindCliqueAndAddTranslation(self, translation, language):
'''Adds the specified translation to the clique with the source message
it is a translation of.
Args:
translation: tclib.Translation()
language: 'en' | 'fr' ...
Return:
True if the source message was found, otherwise false.
'''
if translation.GetId() in self.cliques_:
for clique in self.cliques_[translation.GetId()]:
clique.AddTranslation(translation, language)
return True
else:
return False
def BestClique(self, id):
'''Returns the "best" clique from a list of cliques. All the cliques
must have the same ID. The "best" clique is chosen in the following
order of preference:
- The first clique that has a non-ID-based description.
- If no such clique found, the first clique with an ID-based description.
- Otherwise the first clique.
This method is stable in terms of always returning a clique with
an identical description (on different runs of GRIT on the same
data) because self.cliques_ is sorted by description.
'''
clique_list = self.cliques_[id]
clique_with_id = None
clique_default = None
for clique in clique_list:
if not clique_default:
clique_default = clique
description = clique.GetMessage().GetDescription()
if description and len(description) > 0:
if not description.startswith('ID:'):
# this is the preferred case so we exit right away
return clique
elif not clique_with_id:
clique_with_id = clique
if clique_with_id:
return clique_with_id
else:
return clique_default
def BestCliquePerId(self):
'''Iterates over the list of all cliques and returns the best clique for
each ID. This will be the first clique with a source message that has a
non-empty description, or an arbitrary clique if none of them has a
description.
'''
for id in self.cliques_:
yield self.BestClique(id)
def BestCliqueByOriginalText(self, text, meaning):
'''Finds the "best" (as in BestClique()) clique that has original text
'text' and meaning 'meaning'. Returns None if there is no such clique.
'''
# If needed, this can be optimized by maintaining a map of
# fingerprints of original text+meaning to cliques.
for c in self.BestCliquePerId():
msg = c.GetMessage()
if msg.GetRealContent() == text and msg.GetMeaning() == meaning:
return msg
return None
def AllMessageIds(self):
'''Returns a list of all defined message IDs.
'''
return self.cliques_.keys()
def AllCliques(self):
'''Iterates over all cliques. Note that this can return multiple cliques
with the same ID.
'''
for cliques in self.cliques_.values():
for c in cliques:
yield c
def GenerateXtbParserCallback(self, lang, debug=False):
'''Creates a callback function as required by grit.xtb_reader.Parse().
This callback will create Translation objects for each message from
the XTB that exists in this uberclique, and add them as translations for
the relevant cliques. The callback will add translations to the language
specified by 'lang'
Args:
lang: 'fr'
debug: True | False
'''
def Callback(id, structure):
if id not in self.cliques_:
if debug: print "Ignoring translation #%s" % id
return
if debug: print "Adding translation #%s" % id
# We fetch placeholder information from the original message (the XTB file
# only contains placeholder names).
original_msg = self.BestClique(id).GetMessage()
translation = tclib.Translation(id=id)
for is_ph,text in structure:
if not is_ph:
translation.AppendText(text)
else:
found_placeholder = False
for ph in original_msg.GetPlaceholders():
if ph.GetPresentation() == text:
translation.AppendPlaceholder(tclib.Placeholder(
ph.GetPresentation(), ph.GetOriginal(), ph.GetExample()))
found_placeholder = True
break
if not found_placeholder:
raise exception.MismatchingPlaceholders(
'Translation for message ID %s had <ph name="%s"/>, no match\n'
'in original message' % (id, text))
self.FindCliqueAndAddTranslation(translation, lang)
return Callback
class CustomType(object):
'''A base class you should implement if you wish to specify a custom type
for a message clique (i.e. custom validation and optional modification of
translations).'''
def Validate(self, message):
'''Returns true if the message (a tclib.Message object) is valid,
otherwise false.
'''
raise NotImplementedError()
def ValidateAndModify(self, lang, translation):
'''Returns true if the translation (a tclib.Translation object) is valid,
otherwise false. The language is also passed in. This method may modify
the translation that is passed in, if it so wishes.
'''
raise NotImplementedError()
def ModifyTextPart(self, lang, text):
'''If you call ModifyEachTextPart, it will turn around and call this method
for each text part of the translation. You should return the modified
version of the text, or just the original text to not change anything.
'''
raise NotImplementedError()
def ModifyEachTextPart(self, lang, translation):
'''Call this to easily modify one or more of the textual parts of a
translation. It will call ModifyTextPart for each part of the
translation.
'''
contents = translation.GetContent()
for ix in range(len(contents)):
if (isinstance(contents[ix], types.StringTypes)):
contents[ix] = self.ModifyTextPart(lang, contents[ix])
class OneOffCustomType(CustomType):
'''A very simple custom type that performs the validation expressed by
the input expression on all languages including the source language.
The expression can access the variables 'lang', 'msg' and 'text()' where 'lang'
is the language of 'msg', 'msg' is the message or translation being
validated and 'text()' returns the real contents of 'msg' (for shorthand).
'''
def __init__(self, expression):
self.expr = expression
def Validate(self, message):
return self.ValidateAndModify(MessageClique.source_language, message)
def ValidateAndModify(self, lang, msg):
def text():
return msg.GetRealContent()
return eval(self.expr, {},
{'lang' : lang,
'text' : text,
'msg' : msg,
})
class MessageClique(object):
'''A message along with all of its translations. Also code to bring
translations together with their original message.'''
# change this to the language code of Messages you add to cliques_.
# TODO(joi) Actually change this based on the <grit> node's source language
source_language = 'en'
# A constant translation we use when asked for a translation into the
# special language constants.CONSTANT_LANGUAGE.
CONSTANT_TRANSLATION = tclib.Translation(text='TTTTTT')
# A pattern to match messages that are empty or whitespace only.
WHITESPACE_MESSAGE = lazy_re.compile(u'^\s*$')
def __init__(self, uber_clique, message, translateable=True, custom_type=None):
'''Create a new clique initialized with just a message.
Note that messages with a body comprised only of whitespace will implicitly
be marked non-translatable.
Args:
uber_clique: Our uber-clique (collection of cliques)
message: tclib.Message()
translateable: True | False
custom_type: instance of clique.CustomType interface
'''
# Our parent
self.uber_clique = uber_clique
# If not translateable, we only store the original message.
self.translateable = translateable
# We implicitly mark messages that have a whitespace-only body as
# non-translateable.
if MessageClique.WHITESPACE_MESSAGE.match(message.GetRealContent()):
self.translateable = False
# A mapping of language identifiers to tclib.BaseMessage and its
# subclasses (i.e. tclib.Message and tclib.Translation).
self.clique = { MessageClique.source_language : message }
# A list of the "shortcut groups" this clique is
# part of. Within any given shortcut group, no shortcut key (e.g. &J)
# must appear more than once in each language for all cliques that
# belong to the group.
self.shortcut_groups = []
# An instance of the CustomType interface, or None. If this is set, it will
# be used to validate the original message and translations thereof, and
# will also get a chance to modify translations of the message.
self.SetCustomType(custom_type)
def GetMessage(self):
'''Retrieves the tclib.Message that is the source for this clique.'''
return self.clique[MessageClique.source_language]
def GetId(self):
'''Retrieves the message ID of the messages in this clique.'''
return self.GetMessage().GetId()
def IsTranslateable(self):
return self.translateable
def AddToShortcutGroup(self, group):
self.shortcut_groups.append(group)
def SetCustomType(self, custom_type):
'''Makes this clique use custom_type for validating messages and
translations, and optionally modifying translations.
'''
self.custom_type = custom_type
if custom_type and not custom_type.Validate(self.GetMessage()):
raise exception.InvalidMessage(self.GetMessage().GetRealContent())
def MessageForLanguage(self, lang, pseudo_if_no_match=True, fallback_to_english=False):
'''Returns the message/translation for the specified language, providing
a pseudotranslation if there is no available translation and a pseudo-
translation is requested.
The translation of any message whatsoever in the special language
'x_constant' is the message "TTTTTT".
Args:
lang: 'en'
pseudo_if_no_match: True
fallback_to_english: False
Return:
tclib.BaseMessage
'''
if not self.translateable:
return self.GetMessage()
if lang == constants.CONSTANT_LANGUAGE:
return self.CONSTANT_TRANSLATION
for msglang in self.clique.keys():
if lang == msglang:
return self.clique[msglang]
if lang == constants.FAKE_BIDI:
return pseudo_rtl.PseudoRTLMessage(self.GetMessage())
if fallback_to_english:
self.uber_clique._AddMissingTranslation(lang, self, is_error=False)
return self.GetMessage()
# If we're not supposed to generate pseudotranslations, we add an error
# report to a list of errors, then fail at a higher level, so that we
# get a list of all messages that are missing translations.
if not pseudo_if_no_match:
self.uber_clique._AddMissingTranslation(lang, self, is_error=True)
return pseudo.PseudoMessage(self.GetMessage())
def AllMessagesThatMatch(self, lang_re, include_pseudo = True):
'''Returns a map of all messages that match 'lang', including the pseudo
translation if requested.
Args:
lang_re: re.compile('fr|en')
include_pseudo: True
Return:
{ 'en' : tclib.Message,
'fr' : tclib.Translation,
pseudo.PSEUDO_LANG : tclib.Translation }
'''
if not self.translateable:
return [self.GetMessage()]
matches = {}
for msglang in self.clique:
if lang_re.match(msglang):
matches[msglang] = self.clique[msglang]
if include_pseudo:
matches[pseudo.PSEUDO_LANG] = pseudo.PseudoMessage(self.GetMessage())
return matches
def AddTranslation(self, translation, language):
'''Add a translation to this clique. The translation must have the same
ID as the message that is the source for this clique.
If this clique is not translateable, the function just returns.
Args:
translation: tclib.Translation()
language: 'en'
Throws:
grit.exception.InvalidTranslation if the translation you're trying to add
doesn't have the same message ID as the source message of this clique.
'''
if not self.translateable:
return
if translation.GetId() != self.GetId():
raise exception.InvalidTranslation(
'Msg ID %s, transl ID %s' % (self.GetId(), translation.GetId()))
if language in self.clique:
raise exception.InvalidTranslation('Duplicate msg ID %s' % translation.GetId())
# Because two messages can differ in the original content of their
# placeholders yet share the same ID (because they are otherwise the
# same), the translation we are getting may have different original
# content for placeholders than our message, yet it is still the right
# translation for our message (because it is for the same ID). We must
# therefore fetch the original content of placeholders from our original
# English message.
#
# See grit.clique_unittest.MessageCliqueUnittest.testSemiIdenticalCliques
# for a concrete explanation of why this is necessary.
original = self.MessageForLanguage(self.source_language, False)
if len(original.GetPlaceholders()) != len(translation.GetPlaceholders()):
print ("ERROR: '%s' translation of message id %s does not match" %
(language, translation.GetId()))
assert False
transl_msg = tclib.Translation(id=self.GetId(),
text=translation.GetPresentableContent(),
placeholders=original.GetPlaceholders())
if self.custom_type and not self.custom_type.ValidateAndModify(language, transl_msg):
print "WARNING: %s translation failed validation: %s" % (
language, transl_msg.GetId())
self.clique[language] = transl_msg
|
CrankWheel/grit-i18n
|
grit/clique.py
|
Python
|
bsd-2-clause
| 18,037
|
# Progress hub classes
#
# Copyright (C) 2011-2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Chris Lumens <clumens@redhat.com>
#
from __future__ import division
from gi.repository import GLib, Gtk
import itertools
import os
import sys
import glob
from pyanaconda.i18n import _, C_
from pyanaconda.localization import find_best_locale_match
from pyanaconda.product import productName
from pyanaconda.flags import flags
from pyanaconda import iutil
from pyanaconda.constants import THREAD_INSTALL, THREAD_CONFIGURATION, DEFAULT_LANG, IPMI_FINISHED
from pykickstart.constants import KS_SHUTDOWN, KS_REBOOT
from pyanaconda.ui.gui.hubs import Hub
from pyanaconda.ui.gui.utils import gtk_action_nowait, gtk_call_once
__all__ = ["ProgressHub"]
class ProgressHub(Hub):
builderObjects = ["progressWindow"]
mainWidgetName = "progressWindow"
uiFile = "hubs/progress.glade"
helpFile = "ProgressHub.xml"
def __init__(self, data, storage, payload, instclass):
Hub.__init__(self, data, storage, payload, instclass)
self._totalSteps = 0
self._currentStep = 0
self._configurationDone = False
self._rnotes_id = None
def _do_configuration(self, widget = None, reenable_ransom = True):
from pyanaconda.install import doConfiguration
from pyanaconda.threads import threadMgr, AnacondaThread
assert self._configurationDone == False
self._configurationDone = True
# Disable all personalization spokes
self.builder.get_object("progressWindow-scroll").set_sensitive(False)
if reenable_ransom:
self._start_ransom_notes()
self._restart_spinner()
GLib.timeout_add(250, self._update_progress, self._configuration_done)
threadMgr.add(AnacondaThread(name=THREAD_CONFIGURATION, target=doConfiguration,
args=(self.storage, self.payload, self.data, self.instclass)))
def _start_ransom_notes(self):
# Adding this as a timeout below means it'll get called after 60
# seconds, so we need to do the first call manually.
self._cycle_rnotes()
self._rnotes_id = GLib.timeout_add_seconds(60, self._cycle_rnotes)
def _update_progress(self, callback = None):
from pyanaconda.progress import progressQ
import Queue
q = progressQ.q
# Grab all messages may have appeared since last time this method ran.
while True:
# Attempt to get a message out of the queue for how we should update
# the progress bar. If there's no message, don't error out.
try:
(code, args) = q.get(False)
except Queue.Empty:
break
if code == progressQ.PROGRESS_CODE_INIT:
self._init_progress_bar(args[0])
elif code == progressQ.PROGRESS_CODE_STEP:
self._step_progress_bar()
elif code == progressQ.PROGRESS_CODE_MESSAGE:
self._update_progress_message(args[0])
elif code == progressQ.PROGRESS_CODE_COMPLETE:
q.task_done()
# we are done, stop the progress indication
gtk_call_once(self._progressBar.set_fraction, 1.0)
gtk_call_once(self._progressLabel.set_text, _("Complete!"))
gtk_call_once(self._spinner.stop)
gtk_call_once(self._spinner.hide)
if callback:
callback()
# There shouldn't be any more progress bar updates, so return False
# to indicate this method should be removed from the idle loop.
return False
elif code == progressQ.PROGRESS_CODE_QUIT:
sys.exit(args[0])
q.task_done()
return True
def _configuration_done(self):
# Configuration done, remove ransom notes timer
# and switch to the Reboot page
GLib.source_remove(self._rnotes_id)
self._progressNotebook.set_current_page(1)
iutil.ipmi_report(IPMI_FINISHED)
# kickstart install, continue automatically if reboot or shutdown selected
if flags.automatedInstall and self.data.reboot.action in [KS_REBOOT, KS_SHUTDOWN]:
self.window.emit("continue-clicked")
def _install_done(self):
# package installation done, check personalization spokes
# and start the configuration step if all is ready
if not self._inSpoke and self.continuePossible:
self._do_configuration(reenable_ransom = False)
else:
# some mandatory spokes are not ready
# switch to configure and finish page
GLib.source_remove(self._rnotes_id)
self._progressNotebook.set_current_page(0)
def _do_globs(self, path):
return glob.glob(path + "/*.png") + \
glob.glob(path + "/*.jpg") + \
glob.glob(path + "/*.svg")
def _get_rnotes(self):
# We first look for rnotes in paths containing the language, then in
# directories without the language component. You know, just in case.
paths = ["/tmp/updates/pixmaps/rnotes/",
"/tmp/product/pixmaps/rnotes/",
"/usr/share/anaconda/pixmaps/rnotes/"]
all_lang_pixmaps = []
for path in paths:
all_lang_pixmaps += self._do_globs(path + "/*")
pixmap_langs = [pixmap.split(os.path.sep)[-2] for pixmap in all_lang_pixmaps]
best_lang = find_best_locale_match(os.environ["LANG"], pixmap_langs)
if not best_lang:
# nothing found, try the default language
best_lang = find_best_locale_match(DEFAULT_LANG, pixmap_langs)
if not best_lang:
# nothing found even for the default language, try non-localized rnotes
non_localized = []
for path in paths:
non_localized += self._do_globs(path)
return non_localized
best_lang_pixmaps = []
for path in paths:
best_lang_pixmaps += self._do_globs(path + best_lang)
return best_lang_pixmaps
def _cycle_rnotes(self):
# Change the ransom notes image every minute by grabbing the next
# image's filename. Note that self._rnotesPages is an infinite list,
# so this will cycle through the images indefinitely.
try:
nxt = next(self._rnotesPages)
except StopIteration:
# there are no rnotes
pass
else:
self._progressNotebook.set_current_page(nxt)
return True
def initialize(self):
Hub.initialize(self)
if flags.livecdInstall:
continueText = self.builder.get_object("rebootLabel")
continueText.set_text(_("%s is now successfully installed on your system and ready "
"for you to use! When you are ready, reboot your system to start using it!"))
continueText.set_line_wrap(True)
self.window.get_continue_button().set_label(C_("GUI|Progress", "_Quit"))
self._progressBar = self.builder.get_object("progressBar")
self._progressLabel = self.builder.get_object("progressLabel")
self._progressNotebook = self.builder.get_object("progressNotebook")
self._spinner = self.builder.get_object("progressSpinner")
lbl = self.builder.get_object("configurationLabel")
lbl.set_text(_("%s is now successfully installed, but some configuration still needs to be done.\n"
"Finish it and then click the Finish configuration button please.") %
productName)
lbl = self.builder.get_object("rebootLabel")
lbl.set_text(_("%s is now successfully installed and ready for you to use!\n"
"Go ahead and reboot to start using it!") % productName)
rnotes = self._get_rnotes()
# Get the start of the pages we're about to add to the notebook
rnotes_start = self._progressNotebook.get_n_pages()
if rnotes:
# Add a new page in the notebook for each ransom note image.
for f in rnotes:
img = Gtk.Image.new_from_file(f)
img.show()
self._progressNotebook.append_page(img, None)
# An infinite list of the page numbers containing ransom notes images.
self._rnotesPages = itertools.cycle(range(rnotes_start,
self._progressNotebook.get_n_pages()))
else:
# Add a blank page to the notebook and we'll just cycle to that
# over and over again.
blank = Gtk.Box()
blank.show()
self._progressNotebook.append_page(blank, None)
self._rnotesPages = itertools.cycle([rnotes_start])
def refresh(self):
from pyanaconda.install import doInstall
from pyanaconda.threads import threadMgr, AnacondaThread
Hub.refresh(self)
self._start_ransom_notes()
GLib.timeout_add(250, self._update_progress, self._install_done)
threadMgr.add(AnacondaThread(name=THREAD_INSTALL, target=doInstall,
args=(self.storage, self.payload, self.data, self.instclass)))
def _updateContinueButton(self):
if self._configurationDone:
self.window.set_may_continue(self.continuePossible)
else:
self.builder.get_object("configureButton").set_sensitive(self.continuePossible)
def _init_progress_bar(self, steps):
self._totalSteps = steps
self._currentStep = 0
gtk_call_once(self._progressBar.set_fraction, 0.0)
def _step_progress_bar(self):
if not self._totalSteps:
return
self._currentStep += 1
gtk_call_once(self._progressBar.set_fraction, self._currentStep/self._totalSteps)
def _update_progress_message(self, message):
if not self._totalSteps:
return
gtk_call_once(self._progressLabel.set_text, message)
@gtk_action_nowait
def _restart_spinner(self):
self._spinner.show()
self._spinner.start()
|
snbueno/anaconda
|
pyanaconda/ui/gui/hubs/progress.py
|
Python
|
gpl-2.0
| 11,117
|
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import time
from oslo_config import cfg
from oslo_utils import importutils
from neutron.agent.common import config as agent_config
from neutron.agent.dhcp import config as dhcp_config
from neutron.agent.l3 import agent as l3_agent
from neutron.agent.linux import dhcp
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ovs_lib
from neutron.api.v2 import attributes
from neutron.common import config
from neutron.i18n import _LE
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
NS_MANGLING_PATTERN = ('(%s|%s)' % (dhcp.NS_PREFIX, l3_agent.NS_PREFIX) +
attributes.UUID_PATTERN)
class FakeDhcpPlugin(object):
"""Fake RPC plugin to bypass any RPC calls."""
def __getattribute__(self, name):
def fake_method(*args):
pass
return fake_method
def setup_conf():
"""Setup the cfg for the clean up utility.
Use separate setup_conf for the utility because there are many options
from the main config that do not apply during clean-up.
"""
cli_opts = [
cfg.BoolOpt('force',
default=False,
help=_('Delete the namespace by removing all devices.')),
]
conf = cfg.CONF
conf.register_cli_opts(cli_opts)
agent_config.register_interface_driver_opts_helper(conf)
agent_config.register_use_namespaces_opts_helper(conf)
conf.register_opts(dhcp_config.DHCP_AGENT_OPTS)
conf.register_opts(dhcp_config.DHCP_OPTS)
conf.register_opts(dhcp_config.DNSMASQ_OPTS)
conf.register_opts(interface.OPTS)
return conf
def _get_dhcp_process_monitor(config):
return external_process.ProcessMonitor(
config=config,
resource_type='dhcp')
def kill_dhcp(conf, namespace):
"""Disable DHCP for a network if DHCP is still active."""
network_id = namespace.replace(dhcp.NS_PREFIX, '')
dhcp_driver = importutils.import_object(
conf.dhcp_driver,
conf=conf,
process_monitor=_get_dhcp_process_monitor(conf),
network=dhcp.NetModel(conf.use_namespaces, {'id': network_id}),
plugin=FakeDhcpPlugin())
if dhcp_driver.active:
dhcp_driver.disable()
def eligible_for_deletion(conf, namespace, force=False):
"""Determine whether a namespace is eligible for deletion.
Eligibility is determined by having only the lo device or if force
is passed as a parameter.
"""
# filter out namespaces without UUID as the name
if not re.match(NS_MANGLING_PATTERN, namespace):
return False
ip = ip_lib.IPWrapper(namespace=namespace)
return force or ip.namespace_is_empty()
def unplug_device(conf, device):
try:
device.link.delete()
except RuntimeError:
# Maybe the device is OVS port, so try to delete
ovs = ovs_lib.BaseOVS()
bridge_name = ovs.get_bridge_for_iface(device.name)
if bridge_name:
bridge = ovs_lib.OVSBridge(bridge_name)
bridge.delete_port(device.name)
else:
LOG.debug('Unable to find bridge for device: %s', device.name)
def destroy_namespace(conf, namespace, force=False):
"""Destroy a given namespace.
If force is True, then dhcp (if it exists) will be disabled and all
devices will be forcibly removed.
"""
try:
ip = ip_lib.IPWrapper(namespace=namespace)
if force:
kill_dhcp(conf, namespace)
# NOTE: The dhcp driver will remove the namespace if is it empty,
# so a second check is required here.
if ip.netns.exists(namespace):
for device in ip.get_devices(exclude_loopback=True):
unplug_device(conf, device)
ip.garbage_collect_namespace()
except Exception:
LOG.exception(_LE('Error unable to destroy namespace: %s'), namespace)
def main():
"""Main method for cleaning up network namespaces.
This method will make two passes checking for namespaces to delete. The
process will identify candidates, sleep, and call garbage collect. The
garbage collection will re-verify that the namespace meets the criteria for
deletion (ie it is empty). The period of sleep and the 2nd pass allow
time for the namespace state to settle, so that the check prior deletion
will re-confirm the namespace is empty.
The utility is designed to clean-up after the forced or unexpected
termination of Neutron agents.
The --force flag should only be used as part of the cleanup of a devstack
installation as it will blindly purge namespaces and their devices. This
option also kills any lingering DHCP instances.
"""
conf = setup_conf()
conf()
config.setup_logging()
# Identify namespaces that are candidates for deletion.
candidates = [ns for ns in
ip_lib.IPWrapper.get_namespaces()
if eligible_for_deletion(conf, ns, conf.force)]
if candidates:
time.sleep(2)
for namespace in candidates:
destroy_namespace(conf, namespace, conf.force)
|
cloudbase/neutron-virtualbox
|
neutron/cmd/netns_cleanup.py
|
Python
|
apache-2.0
| 5,850
|
# -*- coding: utf-8 -*-
"""
sphinx.pygments_styles
~~~~~~~~~~~~~~~~~~~~~~
Sphinx theme specific highlighting styles.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.styles.friendly import FriendlyStyle
from pygments.token import Generic, Comment, Number, Whitespace, Keyword, \
Operator, Name, String, Error
class NoneStyle(Style):
"""Style without any styling."""
class SphinxStyle(Style):
"""
Like friendly, but a bit darker to enhance contrast on the green
background.
"""
background_color = '#eeffcc'
default_style = ''
styles = FriendlyStyle.styles
styles.update({
Generic.Output: '#333',
Comment: 'italic #408090',
Number: '#208050',
})
class PyramidStyle(Style):
"""
Pylons/pyramid pygments style based on friendly style, by Blaise Laflamme.
"""
# work in progress...
background_color = "#f8f8f8"
default_style = ""
styles = {
Whitespace: "#bbbbbb",
Comment: "italic #60a0b0",
Comment.Preproc: "noitalic #007020",
Comment.Special: "noitalic bg:#fff0f0",
Keyword: "bold #007020",
Keyword.Pseudo: "nobold",
Keyword.Type: "nobold #902000",
Operator: "#666666",
Operator.Word: "bold #007020",
Name.Builtin: "#007020",
Name.Function: "#06287e",
Name.Class: "bold #0e84b5",
Name.Namespace: "bold #0e84b5",
Name.Exception: "#007020",
Name.Variable: "#bb60d5",
Name.Constant: "#60add5",
Name.Label: "bold #002070",
Name.Entity: "bold #d55537",
Name.Attribute: "#0e84b5",
Name.Tag: "bold #062873",
Name.Decorator: "bold #555555",
String: "#4070a0",
String.Doc: "italic",
String.Interpol: "italic #70a0d0",
String.Escape: "bold #4070a0",
String.Regex: "#235388",
String.Symbol: "#517918",
String.Other: "#c65d09",
Number: "#40a070",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#A00000",
Generic.Inserted: "#00A000",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #c65d09",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "#a40000 bg:#fbe3e4"
}
|
nirmeshk/oh-mainline
|
vendor/packages/sphinx/sphinx/pygments_styles.py
|
Python
|
agpl-3.0
| 3,025
|
from flask import Flask, request, render_template
app = Flask(__name__)
@app.route('/index')
def index():
#jsonObj = request.json_get('txt') non funziona perchè lo script serverSide può essere eseguito una volta soltanto
return render_template('index.html')
@app.route('/process', methods = ['GET', 'POST'])
def processInformation():
data = request.get_json()
result = 'Your data is: ' + str(data)
return result
if __name__ == '__main__':
app.run(host = 'localhost', port = 8080, debug = True)
|
wibox/Posting-JSON-data-to-Flask-server
|
__init__.py
|
Python
|
mit
| 541
|
from __future__ import unicode_literals, division, absolute_import
from argparse import ArgumentParser, ArgumentTypeError
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from sqlalchemy.orm.exc import NoResultFound
from flexget import options
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.manager import Session
from flexget.plugin import PluginError, DependencyError
from flexget.terminal import TerminalTable, TerminalTableError, table_parser, console
from flexget.utils.tools import split_title_year
from . import db
from .movie_list import MovieListBase
def lookup_movie(title, session, identifiers=None):
try:
imdb_lookup = plugin.get('imdb_lookup', 'movie_list').lookup
except DependencyError:
imdb_lookup = None
try:
tmdb_lookup = plugin.get('tmdb_lookup', 'movie_list').lookup
except DependencyError:
tmdb_lookup = None
if not (imdb_lookup or tmdb_lookup):
return
entry = Entry(title=title)
if identifiers:
for identifier in identifiers:
for key, value in identifier.items():
entry[key] = value
try:
imdb_lookup(entry, session=session)
# IMDB lookup raises PluginError instead of the normal ValueError
except PluginError:
tmdb_lookup(entry)
# Return only if lookup was successful
if entry.get('movie_name'):
return entry
return
def movie_list_keyword_type(identifier):
if identifier.count('=') != 1:
raise ArgumentTypeError(
'Received identifier in wrong format: {}, '
' should be in keyword format like `imdb_id=tt1234567`'.format(identifier)
)
name, value = identifier.split('=', 2)
if name not in MovieListBase().supported_ids:
raise ArgumentTypeError(
'Received unsupported identifier ID {}. Should be one of {}'.format(
identifier, ' ,'.join(MovieListBase().supported_ids)
)
)
return {name: value}
def do_cli(manager, options):
"""Handle movie-list subcommand"""
if options.list_action == 'all':
movie_list_lists(options)
return
if options.list_action == 'list':
movie_list_list(options)
return
if options.list_action == 'add':
movie_list_add(options)
return
if options.list_action == 'del':
movie_list_del(options)
return
if options.list_action == 'purge':
movie_list_purge(options)
return
def movie_list_lists(options):
""" Show all movie lists """
lists = db.get_movie_lists()
header = ['#', 'List Name']
table_data = [header]
for movie_list in lists:
table_data.append([movie_list.id, movie_list.name])
try:
table = TerminalTable(options.table_type, table_data)
except TerminalTableError as e:
console('ERROR: {}'.format(e))
else:
console(table.output)
def movie_list_list(options):
"""List movie list"""
with Session() as session:
try:
movie_list = db.get_list_by_exact_name(options.list_name)
except NoResultFound:
console('Could not find movie list with name {}'.format(options.list_name))
return
header = ['#', 'Movie Name', 'Movie year']
header += db.MovieListBase().supported_ids
table_data = [header]
movies = db.get_movies_by_list_id(
movie_list.id, order_by='added', descending=True, session=session
)
for movie in movies:
movie_row = [movie.id, movie.title, movie.year or '']
for identifier in db.MovieListBase().supported_ids:
movie_row.append(movie.identifiers.get(identifier, ''))
table_data.append(movie_row)
title = '{} Movies in movie list: `{}`'.format(len(movies), options.list_name)
try:
table = TerminalTable(options.table_type, table_data, title, drop_columns=[5, 2, 4])
except TerminalTableError as e:
console('ERROR: {}'.format(e))
else:
console(table.output)
def movie_list_add(options):
with Session() as session:
try:
movie_list = db.get_list_by_exact_name(options.list_name, session=session)
except NoResultFound:
console('Could not find movie list with name {}, creating'.format(options.list_name))
movie_list = db.MovieListList(name=options.list_name)
session.add(movie_list)
session.commit()
title, year = split_title_year(options.movie_title)
console('Trying to lookup movie title: `{}`'.format(title))
movie_lookup = lookup_movie(title=title, session=session, identifiers=options.identifiers)
if not movie_lookup:
console(
'ERROR: movie lookup failed for movie {}, aborting'.format(options.movie_title)
)
return
title = movie_lookup['movie_name']
movie = db.get_movie_by_title_and_year(
list_id=movie_list.id, title=title, year=year, session=session
)
if not movie:
console("Adding movie with title {} to list {}".format(title, movie_list.name))
movie = db.MovieListMovie(title=title, year=year, list_id=movie_list.id)
else:
console("Movie with title {} already exist in list {}".format(title, movie_list.name))
id_list = []
if options.identifiers:
id_list = options.identifiers
else:
for _id in db.MovieListBase().supported_ids:
if movie_lookup.get(_id):
id_list.append({_id: movie_lookup.get(_id)})
if id_list:
console('Setting movie identifiers:')
for ident in id_list:
for key in ident:
console('{}: {}'.format(key, ident[key]))
movie.ids = db.get_db_movie_identifiers(identifier_list=id_list, session=session)
session.merge(movie)
console('Successfully added movie {} to movie list {} '.format(title, movie_list.name))
def movie_list_del(options):
with Session() as session:
try:
movie_list = db.get_list_by_exact_name(options.list_name)
except NoResultFound:
console('Could not find movie list with name {}'.format(options.list_name))
return
try:
movie_exist = db.get_movie_by_id(
list_id=movie_list.id, movie_id=int(options.movie), session=session
)
except NoResultFound:
console(
'Could not find movie with ID {} in list `{}`'.format(
int(options.movie), options.list_name
)
)
return
except ValueError:
title, year = split_title_year(options.movie)
movie_exist = db.get_movie_by_title_and_year(
list_id=movie_list.id, title=title, year=year, session=session
)
if not movie_exist:
console(
'Could not find movie with title {} in list {}'.format(
options.movie, options.list_name
)
)
return
else:
console('Removing movie {} from list {}'.format(movie_exist.title, options.list_name))
session.delete(movie_exist)
def movie_list_purge(options):
with Session() as session:
try:
movie_list = db.get_list_by_exact_name(options.list_name)
except NoResultFound:
console('Could not find movie list with name {}'.format(options.list_name))
return
console('Deleting list {}'.format(options.list_name))
session.delete(movie_list)
@event('options.register')
def register_parser_arguments():
# Common option to be used in multiple subparsers
movie_parser = ArgumentParser(add_help=False)
movie_parser.add_argument('movie_title', metavar='<MOVIE TITLE>', help="Title of the movie")
name_or_id_parser = ArgumentParser(add_help=False)
name_or_id_parser.add_argument(
'movie', metavar='<NAME or ID>', help="Title or ID of the movie"
)
identifiers_parser = ArgumentParser(add_help=False)
identifiers_parser.add_argument(
'-i',
'--identifiers',
metavar='<identifiers>',
nargs='+',
type=movie_list_keyword_type,
help='Can be a string or a list of string with the format imdb_id=XXX,'
' tmdb_id=XXX, etc',
)
list_name_parser = ArgumentParser(add_help=False)
list_name_parser.add_argument(
'list_name',
nargs='?',
metavar='<LIST NAME>',
default='movies',
help='Name of movie list to operate on (Default is \'movies\')',
)
# Register subcommand
parser = options.register_command('movie-list', do_cli, help='View and manage movie lists')
# Set up our subparsers
subparsers = parser.add_subparsers(title='actions', metavar='<action>', dest='list_action')
subparsers.add_parser('all', parents=[table_parser], help='Shows all existing movie lists')
subparsers.add_parser(
'list', parents=[list_name_parser, table_parser], help='List movies from a list'
)
subparsers.add_parser(
'add',
parents=[list_name_parser, movie_parser, identifiers_parser],
help='Add a movie to a list',
)
subparsers.add_parser(
'del',
parents=[list_name_parser, name_or_id_parser],
help='Remove a movie from a list using its title or ID',
)
subparsers.add_parser(
'purge',
parents=[list_name_parser],
help='Removes an entire list with all of its movies. Use this with caution',
)
|
gazpachoking/Flexget
|
flexget/components/managed_lists/lists/movie_list/cli.py
|
Python
|
mit
| 9,783
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
from django.conf import settings
from opps.contrib.feeds.views import (ContainerFeed, ChannelFeed,
ContainerAtomFeed, ChannelAtomFeed)
from opps.core.tags.views import TagList
from opps.core.cache import cache_page
from .views import ContainerList, ContainerDetail
from .views import Search
urlpatterns = patterns(
'',
url(r'^$', ContainerList.as_view(), name='home'),
url(r'^(rss|feed)$', cache_page(settings.OPPS_CACHE_EXPIRE)(
ContainerFeed()), name='feed'),
url(r'^atom$', cache_page(settings.OPPS_CACHE_EXPIRE)(
ContainerAtomFeed()), name='atom_feed'),
url(r'^search/', Search(), name='search'),
url(r'^tag/(?P<tag>[\w//-]+)$',
cache_page(settings.OPPS_CACHE_EXPIRE)(
TagList.as_view()), name='tag_open'),
url(r'^(?P<long_slug>[\w\b//-]+)/(rss|feed)$',
cache_page(settings.OPPS_CACHE_EXPIRE)(
ChannelFeed()), name='channel_feed'),
url(r'^(?P<long_slug>[\w\b//-]+)/atom$',
cache_page(settings.OPPS_CACHE_EXPIRE)(
ChannelAtomFeed()), name='channel_atom_feed'),
url(r'^(?P<channel__long_slug>[\w//-]+)/(?P<slug>[\w-]+)\.html$',
cache_page(settings.OPPS_CACHE_EXPIRE_DETAIL)(
ContainerDetail.as_view()), name='open'),
url(r'^(?P<channel__long_slug>[\w\b//-]+)/$',
cache_page(settings.OPPS_CACHE_EXPIRE_LIST)(
ContainerList.as_view()), name='channel'),
url(r'^(?P<slug>[\w-]+)\.html$',
cache_page(settings.OPPS_CACHE_EXPIRE_DETAIL)(
ContainerDetail.as_view()), name='homepage_open'),
)
|
laborautonomo/opps
|
opps/containers/urls.py
|
Python
|
mit
| 1,707
|
# -*- coding: utf-8 -*-
## Part of the pyprimes.py package.
##
## Copyright © 2014 Steven D'Aprano.
## See the file __init__.py for the licence terms for this software.
"""The module implements various prime generating and testing functions using
the Strategy design pattern, allowing the caller to easily experiment with
different algorithms and implementations.
The functions in this module will take at least one mandatory argument,
usually named either ``strategy`` or ``prover``.
strategy:
The ``strategy`` argument is used to delegate to a prime generator.
It must be a function which takes no arguments and returns an
iterator that yields primes. (A generator function is a convenient
way to manage this.)
This module makes no check that the strategy function actually
yields prime numbers. It is the caller's responsibility to ensure
that is the case.
prover:
The ``prover`` argument is used to delegate to a primality testing
function. It must be a function which takes a single argument, an
integer, and returns one of the following flags:
0 or False Number is definitely nonprime.
1 or True Number is definitely prime.
2 Number is a probable prime or pseudoprime.
Any other result will raise TypeError or ValueError.
This module makes no check to confirm that the prover function
actually tests for primality. It is the caller's responsibility to
ensure that is the case.
"""
from __future__ import division
from pyprimes.compat23 import next
__all__ = ['is_prime', 'next_prime', 'prev_prime', 'primes',
'trial_division',
]
# === Primality testing ===
def is_prime(prover, n):
"""Perform a primality test on n using the given prover.
See the docstring for this module for specifications for
the ``prover`` function.
>>> import pyprimes.awful
>>> is_prime(pyprimes.awful.isprime, 103)
True
>>> is_prime(pyprimes.awful.isprime, 105)
False
"""
flag = prover(n)
if flag is True or flag is False:
return flag
# Check for actual ints, not subclasses. Gosh this takes me back to
# Python 1.5 days...
if type(flag) is int:
if flag in (0, 1, 2):
return flag
raise ValueError('prover returned invalid int flag %d' % flag)
raise TypeError('expected bool or int but prover returned %r' % flag)
def trial_division(strategy, n):
"""Perform a trial division primality test using the given strategy.
See this module's docstring for specifications for the ``strategy``
function.
This performs an exact but slow primality test using trial division
by dividing by primes only. It returns True if the argument is a
prime number, otherwise False.
>>> import pyprimes.awful
>>> trial_division(pyprimes.awful.primes0, 11)
True
>>> trial_division(pyprimes.awful.primes0, 12)
False
For large values of n, this may be slow or run out of memory.
"""
if n < 2:
return False
if n == 2:
return True
if n % 2 == 0:
return False
limit = n**0.5 # FIXME: should use exact isqrt
for divisor in strategy():
if divisor > limit: break
if n % divisor == 0: return False
return True
# === Prime generators ===
def primes(strategy, start=None, end=None):
"""Yield primes using the given strategy function.
See this module's docstring for specifications for the ``strategy``
function.
If the optional arguments ``start`` and ``end`` are given, they must be
either None or an integer. Only primes in the half-open range ``start``
(inclusive) to ``end`` (exclusive) are yielded. If ``start`` is None,
the range begins at the lowest prime (namely 2), if ``end`` is None,
the range has no upper limit.
>>> from pyprimes.awful import turner
>>> list(primes(turner, 6, 30))
[7, 11, 13, 17, 19, 23, 29]
"""
#return filter_between(gen(), start, end)
it = strategy()
p = next(it)
if start is not None:
# Drop the primes below start as fast as possible, then yield.
while p < start:
p = next(it)
assert start is None or p >= start
if end is not None:
while p < end:
yield p
p = next(it)
else:
while True:
yield p
p = next(it)
# Then yield until end.
def next_prime(prover, n):
"""Return the first prime number strictly greater than n.
See the docstring for this module for specifications for
the ``prover`` function.
>>> import pyprimes.awful
>>> next_prime(pyprimes.awful.isprime, 97)
101
"""
if n < 2:
return 2
# Advance to the next odd number.
if n % 2 == 0: n += 1
else: n += 2
assert n%2 == 1
while not is_prime(prover, n):
n += 2
return n
def prev_prime(prover, n):
"""Return the first prime number strictly less than n.
See the docstring for this module for specifications for
the ``prover`` function.
>>> import pyprimes.awful
>>> prev_prime(pyprimes.awful.isprime, 100)
97
If there are no primes less than n, raises ValueError.
"""
if n <= 2:
raise ValueError('There are no smaller primes than 2.')
# Retreat to the previous odd number.
if n % 2 == 1: n -= 2
else: n -= 1
assert n%2 == 1
while not is_prime(prover, n):
n -= 2
return n
|
skilledindia/pyprimes
|
src/pyprimes/strategic.py
|
Python
|
mit
| 5,593
|
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2011,2015 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Gtk modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import Pango
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.plug import Gramplet
from gramps.gui.widgets import Photo
from gramps.gen.utils.place import conv_lat_lon
from gramps.gen.utils.file import media_path_full
from gramps.gen.display.place import displayer as place_displayer
from gramps.gen.const import COLON, GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
class PlaceDetails(Gramplet):
"""
Displays details for a place.
"""
def init(self):
self.gui.WIDGET = self.build_gui()
self.gui.get_container_widget().remove(self.gui.textview)
self.gui.get_container_widget().add(self.gui.WIDGET)
def build_gui(self):
"""
Build the GUI interface.
"""
self.top = Gtk.Box()
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.photo = Photo(self.uistate.screen_height() < 1000)
self.title = Gtk.Label(halign=Gtk.Align.START)
self.title.override_font(Pango.FontDescription('sans bold 12'))
self.title.set_selectable(True)
vbox.pack_start(self.title, False, True, 7)
self.grid = Gtk.Grid(orientation=Gtk.Orientation.VERTICAL)
self.grid.set_column_spacing(10)
vbox.pack_start(self.grid, False, True, 0)
self.top.pack_start(self.photo, False, True, 5)
self.top.pack_start(vbox, False, True, 10)
self.top.show_all()
return self.top
def add_row(self, title, value):
"""
Add a row to the table.
"""
label = Gtk.Label(label=title + COLON, halign=Gtk.Align.END,
valign=Gtk.Align.START)
label.set_selectable(True)
label.show()
value = Gtk.Label(label=value, halign=Gtk.Align.START)
value.set_selectable(True)
value.show()
self.grid.add(label)
self.grid.attach_next_to(value, label, Gtk.PositionType.RIGHT, 1, 1)
def clear_grid(self):
"""
Remove all the rows from the grid.
"""
list(map(self.grid.remove, self.grid.get_children()))
def db_changed(self):
self.connect(self.dbstate.db, 'place-update', self.update)
self.connect_signal('Place', self.update)
def update_has_data(self):
active_handle = self.get_active('Person')
if active_handle:
active_person = self.dbstate.db.get_person_from_handle(active_handle)
self.set_has_data(active_person is not None)
else:
self.set_has_data(False)
def main(self):
self.display_empty()
active_handle = self.get_active('Place')
if active_handle:
place = self.dbstate.db.get_place_from_handle(active_handle)
self.top.hide()
if place:
self.display_place(place)
self.set_has_data(True)
else:
self.set_has_data(False)
self.top.show()
else:
self.set_has_data(False)
def display_place(self, place):
"""
Display details of the active place.
"""
self.load_place_image(place)
title = place_displayer.display(self.dbstate.db, place)
self.title.set_text(title)
self.clear_grid()
self.add_row(_('Name'), place.get_name().get_value())
self.add_row(_('Type'), place.get_type())
self.display_separator()
self.display_alt_names(place)
self.display_separator()
lat, lon = conv_lat_lon(place.get_latitude(),
place.get_longitude(),
format='DEG')
if lat:
self.add_row(_('Latitude'), lat)
if lon:
self.add_row(_('Longitude'), lon)
def display_alt_names(self, place):
"""
Display alternative names for the place.
"""
alt_names = ["%s (%s)" % (name.get_value(), name.get_language())
if name.get_language() else name.get_value()
for name in place.get_alternative_names()]
if len(alt_names) > 0:
self.add_row(_('Alternative Names'), '\n'.join(alt_names))
def display_empty(self):
"""
Display empty details when no repository is selected.
"""
self.photo.set_image(None)
self.photo.set_uistate(None, None)
self.title.set_text('')
self.clear_grid()
def display_separator(self):
"""
Display an empty row to separate groupd of entries.
"""
label = Gtk.Label(label='')
label.override_font(Pango.FontDescription('sans 4'))
label.set_selectable(True)
label.show()
self.grid.add(label)
def load_place_image(self, place):
"""
Load the primary image if it exists.
"""
media_list = place.get_media_list()
if media_list:
media_ref = media_list[0]
object_handle = media_ref.get_reference_handle()
obj = self.dbstate.db.get_media_from_handle(object_handle)
full_path = media_path_full(self.dbstate.db, obj.get_path())
mime_type = obj.get_mime_type()
if mime_type and mime_type.startswith("image"):
self.photo.set_image(full_path, mime_type,
media_ref.get_rectangle())
self.photo.set_uistate(self.uistate, object_handle)
else:
self.photo.set_image(None)
self.photo.set_uistate(None, None)
else:
self.photo.set_image(None)
self.photo.set_uistate(None, None)
|
prculley/gramps
|
gramps/plugins/gramplet/placedetails.py
|
Python
|
gpl-2.0
| 6,794
|
# coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from contextlib import contextmanager
import psutil
class ProcessStillRunning(AssertionError):
"""Raised when a process shouldn't be running but is."""
def _safe_iter_matching_processes(name):
for proc in psutil.process_iter():
try:
if name in ''.join([part.decode('utf-8') for part in proc.cmdline()]):
yield proc
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
def _make_process_table(processes):
line_tmpl = '{0:>7} {1:>7} {2}'
proc_tuples = [(p.pid, p.ppid(), ''.join(p.cmdline())) for p in processes]
return '\n'.join(
[
line_tmpl.format('PID', 'PGID', 'CMDLINE')
] + [
line_tmpl.format(*t) for t in sorted(proc_tuples)
]
)
@contextmanager
def no_lingering_process_by_command(name):
"""Asserts that no process exists for a given command with a helpful error, excluding
existing processes outside of the scope of the contextmanager."""
before_processes = set(_safe_iter_matching_processes(name))
yield
after_processes = set(_safe_iter_matching_processes(name))
delta_processes = after_processes.difference(before_processes)
if delta_processes:
raise ProcessStillRunning(
'{} {} processes lingered after tests:\n{}'
.format(len(delta_processes), name, _make_process_table(delta_processes))
)
|
UnrememberMe/pants
|
tests/python/pants_test/testutils/process_test_util.py
|
Python
|
apache-2.0
| 1,611
|
import json
import os
from django.test import TestCase
from corehq.apps.commtrack.tests.util import bootstrap_domain as initial_bootstrap
from corehq.apps.sms.mixin import VerifiedNumber
from corehq.apps.users.models import CommCareUser
from custom.ewsghana.api import EWSApi, SMSUser
from custom.ilsgateway.tests.mock_endpoint import MockEndpoint
TEST_DOMAIN = 'ewsghana-commtrack-smsusers-test'
class SMSUsersSyncTest(TestCase):
def setUp(self):
self.endpoint = MockEndpoint('http://test-api.com/', 'dummy', 'dummy')
self.api_object = EWSApi(TEST_DOMAIN, self.endpoint)
self.datapath = os.path.join(os.path.dirname(__file__), 'data')
initial_bootstrap(TEST_DOMAIN)
self.api_object.prepare_commtrack_config()
self.api_object.create_or_edit_roles()
for user in CommCareUser.by_domain(TEST_DOMAIN):
user.delete()
for verified_number in VerifiedNumber.by_domain(TEST_DOMAIN):
verified_number.delete()
def test_create_smsuser(self):
with open(os.path.join(self.datapath, 'sample_smsusers.json')) as f:
smsuser = SMSUser(json.loads(f.read())[0])
self.assertEqual(0, len(CommCareUser.by_domain(TEST_DOMAIN)))
ewsghana_smsuser = self.api_object.sms_user_sync(smsuser)
self.assertIsNotNone(ewsghana_smsuser.get_id)
username_part = "%s%d" % (ewsghana_smsuser.name.strip().replace(' ', '.').lower(), smsuser.id)
username = "%s@%s.commcarehq.org" % (username_part, TEST_DOMAIN)
self.assertEqual(username, ewsghana_smsuser.username)
self.assertEqual(smsuser.is_active, str(ewsghana_smsuser.is_active))
self.assertEqual(False, ewsghana_smsuser.is_superuser)
self.assertEqual(False, ewsghana_smsuser.is_staff)
verified_number = ewsghana_smsuser.get_verified_number()
self.assertIsNotNone(verified_number)
self.assertEqual(verified_number.phone_number, '2222222222')
domain_name = ewsghana_smsuser.get_domains()[0]
self.assertEqual(TEST_DOMAIN, domain_name)
def test_edit_smsuser(self):
with open(os.path.join(self.datapath, 'sample_smsusers.json')) as f:
smsuser = SMSUser(json.loads(f.read())[0])
self.assertEqual(0, len(CommCareUser.by_domain(TEST_DOMAIN)))
ewsghana_smsuser = self.api_object.sms_user_sync(smsuser)
verified_number = ewsghana_smsuser.get_verified_number()
self.assertIsNotNone(verified_number)
user_id = ewsghana_smsuser.get_id
self.assertIsNotNone(user_id)
smsuser.phone_numbers = ['111111111']
ewsghana_smsuser = self.api_object.sms_user_sync(smsuser)
self.assertEqual(user_id, ewsghana_smsuser.get_id)
self.assertEqual(ewsghana_smsuser.default_phone_number, '111111111')
self.assertListEqual(ewsghana_smsuser.phone_numbers, ['111111111'])
verified_number = ewsghana_smsuser.get_verified_number()
self.assertIsNotNone(verified_number)
self.assertEqual(verified_number.phone_number, '111111111')
def test_edit_phone_number1(self):
"""
When phone number is deleted on EWS side it also should be deleted in HQ
:return:
"""
with open(os.path.join(self.datapath, 'sample_smsusers.json')) as f:
smsuser = SMSUser(json.loads(f.read())[0])
self.assertEqual(0, len(CommCareUser.by_domain(TEST_DOMAIN)))
ewsghana_smsuser = self.api_object.sms_user_sync(smsuser)
verified_number = ewsghana_smsuser.get_verified_number()
self.assertIsNotNone(verified_number)
smsuser.phone_numbers = []
ewsghana_smsuser = self.api_object.sms_user_sync(smsuser)
self.assertIsNone(ewsghana_smsuser.default_phone_number)
self.assertListEqual(ewsghana_smsuser.phone_numbers, [])
verified_number = ewsghana_smsuser.get_verified_number()
self.assertIsNone(verified_number)
def test_edit_phone_number2(self):
"""
When phone number is added on EWS side it also should be added in HQ
"""
with open(os.path.join(self.datapath, 'sample_smsusers.json')) as f:
smsuser = SMSUser(json.loads(f.read())[0])
smsuser.phone_numbers = []
self.assertEqual(0, len(CommCareUser.by_domain(TEST_DOMAIN)))
ewsghana_smsuser = self.api_object.sms_user_sync(smsuser)
verified_number = ewsghana_smsuser.get_verified_number()
self.assertIsNone(verified_number)
smsuser.phone_numbers = ['111111111']
ewsghana_smsuser = self.api_object.sms_user_sync(smsuser)
self.assertIsNotNone(ewsghana_smsuser.default_phone_number)
self.assertListEqual(ewsghana_smsuser.phone_numbers, ['111111111'])
verified_number = ewsghana_smsuser.get_verified_number()
self.assertIsNotNone(verified_number)
self.assertEqual(verified_number.phone_number, '111111111')
def test_edit_phone_number3(self):
"""
When phone number is edited on EWS side it also should be edited in HQ
"""
with open(os.path.join(self.datapath, 'sample_smsusers.json')) as f:
smsuser = SMSUser(json.loads(f.read())[0])
self.assertEqual(0, len(CommCareUser.by_domain(TEST_DOMAIN)))
ewsghana_smsuser = self.api_object.sms_user_sync(smsuser)
verified_number = ewsghana_smsuser.get_verified_number()
self.assertIsNotNone(verified_number)
self.assertEqual(verified_number.phone_number, '2222222222')
smsuser.phone_numbers = ['111111111']
ewsghana_smsuser = self.api_object.sms_user_sync(smsuser)
self.assertIsNotNone(ewsghana_smsuser.default_phone_number)
self.assertListEqual(ewsghana_smsuser.phone_numbers, ['111111111'])
verified_number = ewsghana_smsuser.get_verified_number()
self.assertIsNotNone(verified_number)
self.assertEqual(verified_number.phone_number, '111111111')
def test_edit_phone_number4(self):
"""
Number shouldn't be changed when is not edited on EWS side.
"""
with open(os.path.join(self.datapath, 'sample_smsusers.json')) as f:
smsuser = SMSUser(json.loads(f.read())[0])
self.assertEqual(0, len(CommCareUser.by_domain(TEST_DOMAIN)))
ewsghana_smsuser = self.api_object.sms_user_sync(smsuser)
verified_number = ewsghana_smsuser.get_verified_number()
self.assertIsNotNone(verified_number)
self.assertEqual(verified_number.phone_number, '2222222222')
ewsghana_smsuser = self.api_object.sms_user_sync(smsuser)
verified_number = ewsghana_smsuser.get_verified_number()
self.assertIsNotNone(verified_number)
self.assertEqual(verified_number.phone_number, '2222222222')
|
puttarajubr/commcare-hq
|
custom/ewsghana/tests/test_smsusers_sync.py
|
Python
|
bsd-3-clause
| 6,801
|
{
'name': 'Invoice Discount',
'version': '1.0',
'category': 'Accounting',
'sequence': 1,
'summary': "Show Discount Total and Total before Discount on Invoices. ",
'description':"Show Discount Total and Total before Discount on Invoices.",
'author': 'M.Hagag@DVIT.ME',
'website': 'http://dvit.me',
'website': 'http://www.dvit.me',
'depends': ['account_voucher'],
'data': [
'discount_view.xml',
'views/report_discount.xml',
],
'installable': True,
'auto_install': False,
}
|
hubercinux/dvit-odoo8
|
invoice_discount/__openerp__.py
|
Python
|
agpl-3.0
| 549
|
#-----------------------------------------------------------------------------
# Name: ExplorerShell.py
# Product: ClamWin Free Antivirus
#
# Author: alch [alch at users dot sourceforge dot net]
#
# Created: 2004/19/03
# Copyright: Copyright alch (c) 2004
# Licence:
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#-----------------------------------------------------------------------------
# This code is based on context_menu.py demo from Mark Hammond's win32 Extensions
import pythoncom
from win32com.shell import shell, shellcon
import win32gui, win32con, win32api
import Process
import os, sys, time
import RedirectStd
IContextMenu_Methods = ["QueryContextMenu", "InvokeCommand", "GetCommandString"]
IShellExtInit_Methods = ["Initialize"]
class ShellExtension:
_reg_progid_ = "ClamWin.ShellExtension.ContextMenu"
_reg_desc_ = "ClamWin Context Menu"
_reg_clsid_ = "{94FDC9F6-8C9B-4a70-8DBB-7662FFE48EB4}"
_com_interfaces_ = [shell.IID_IShellExtInit, shell.IID_IContextMenu]
_public_methods_ = IContextMenu_Methods + IShellExtInit_Methods
def Initialize(self, folder, dataobj, hkey):
print 'Initialize'
self.dataobj = dataobj
def QueryContextMenu(self, hMenu, indexMenu, idCmdFirst, idCmdLast, uFlags):
print 'QueryContextMenu'
# Query the items clicked on
format_etc = win32con.CF_HDROP, None, 1, -1, pythoncom.TYMED_HGLOBAL
try:
sm = self.dataobj.GetData(format_etc)
except pythoncom.com_error:
return 0
num_files = shell.DragQueryFile(sm.data_handle, -1)
msg = "Scan For Viruses With ClamWin"
if num_files>1:
# we aren't handling multiple files
return 0
else:
self._fname = shell.DragQueryFile(sm.data_handle, 0)
idCmd = idCmdFirst
items = []
if (uFlags & 0x000F) == shellcon.CMF_NORMAL or uFlags & shellcon.CMF_EXPLORE:
items.append(msg)
win32gui.InsertMenu(hMenu, indexMenu,
win32con.MF_SEPARATOR|win32con.MF_BYPOSITION,
0, None)
indexMenu += 1
for item in items:
win32gui.InsertMenu(hMenu, indexMenu,
win32con.MF_STRING|win32con.MF_BYPOSITION,
idCmd, item)
indexMenu += 1
idCmd += 1
win32gui.InsertMenu(hMenu, indexMenu,
win32con.MF_SEPARATOR|win32con.MF_BYPOSITION,
0, None)
indexMenu += 1
return idCmd-idCmdFirst # Must return number of menu items we added.
def InvokeCommand(self, ci):
print 'InvokeCommand'
mask, hwnd, verb, params, dir, nShow, hotkey, hicon = ci
# get the directory of our dll
try:
if hasattr(sys, "frozen"):
# attempt to read the folder form registry first
key = None
try:
key = win32api.RegOpenKeyEx(win32con.HKEY_LOCAL_MACHINE, 'Software\\ClamWin')
currentDir = win32api.RegQueryValueEx(key, 'Path')[0]
win32api.CloseHandle(key)
except win32api.error:
if key is not None:
win32api.CloseHandle(key)
# couldnt find it in the registry
# get it from command line
if sys.frozen == "dll":
this_filename = win32api.GetModuleFileName(sys.frozendllhandle)
else:
this_filename = sys.executable
currentDir = os.path.split(this_filename)[0]
else:
currentDir = os.path.split(os.path.abspath(__file__))[0]
except NameError: # No __file__ attribute (in boa debugger)
currentDir = os.path.split(os.path.abspath(sys.argv[0]))[0]
os.chdir(currentDir)
# we need to resort to calling external executable here
# because wxPython has some threading issues when called from
# multiple Windows Explorer instances
# read this value from registry
exe = os.path.join(currentDir, 'ClamWin.exe')
if not os.path.exists(exe):
win32gui.MessageBox(hwnd, 'Could not locate file: %s'% exe, 'ClamWin', win32con.MB_OK | win32con.MB_ICONEXCLAMATION)
else:
cmd = '"%s" --mode=scanner --path="%s"' % (exe, self._fname)
try:
proc = Process.ProcessOpen(cmd)
proc.close()
except Process.ProcessError:
win32gui.MessageBox(hwnd, 'Could not execute %s.' % cmd, 'ClamWin', win32con.MB_OK | win32con.MB_ICONEXCLAMATION)
def GetCommandString(self, cmd, typ):
return "ClamWin Free Antivirus"
def DllRegisterServer():
import _winreg
keyNames = ("Folder\\shellex", "*\\shellex")
for name in keyNames:
key = _winreg.CreateKey(_winreg.HKEY_CLASSES_ROOT, name)
subkey = _winreg.CreateKey(key, "ContextMenuHandlers")
subkey2 = _winreg.CreateKey(subkey, "ClamWin")
_winreg.SetValueEx(subkey2, None, 0, _winreg.REG_SZ, ShellExtension._reg_clsid_)
print ShellExtension._reg_desc_, "registration complete."
def DllUnregisterServer():
import _winreg
try:
keyNames = ("Folder\\shellex", "*\\shellex")
for name in keyNames:
key = _winreg.DeleteKey(_winreg.HKEY_CLASSES_ROOT,
name + "\\ContextMenuHandlers\\ClamWin")
except WindowsError, details:
import errno
if details.errno != errno.ENOENT:
raise
print ShellExtension._reg_desc_, "unregistration complete."
if __name__=='__main__':
from win32com.server import register
register.UseCommandLine(ShellExtension,
finalize_register = DllRegisterServer,
finalize_unregister = DllUnregisterServer)
|
ghostshellgnome/clamwin
|
py/ExplorerShell.py
|
Python
|
gpl-2.0
| 6,829
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# limitations under the License.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_object
short_description: create/read/update/delete object in PAN-OS or Panorama
description: >
- Policy objects form the match criteria for policy rules and many other functions in PAN-OS. These may include
address object, address groups, service objects, service groups, and tag.
author: "Bob Hagen (@rnh556)"
version_added: "2.4"
requirements:
- pan-python can be obtained from PyPi U(https://pypi.python.org/pypi/pan-python)
- pandevice can be obtained from PyPi U(https://pypi.python.org/pypi/pandevice)
notes:
- Checkmode is not supported.
- Panorama is supported.
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device or Panorama management console being configured.
required: true
username:
description:
- Username credentials to use for authentication.
required: false
default: "admin"
password:
description:
- Password credentials to use for authentication.
required: true
api_key:
description:
- API key that can be used instead of I(username)/I(password) credentials.
operation:
description:
- The operation to be performed. Supported values are I(add)/I(delete)/I(find).
required: true
addressobject:
description:
- The name of the address object.
address:
description:
- The IP address of the host or network in CIDR notation.
address_type:
description:
- The type of address object definition. Valid types are I(ip-netmask) and I(ip-range).
addressgroup:
description:
- A static group of address objects or dynamic address group.
static_value:
description:
- A group of address objects to be used in an addressgroup definition.
dynamic_value:
description:
- The filter match criteria to be used in a dynamic addressgroup definition.
serviceobject:
description:
- The name of the service object.
source_port:
description:
- The source port to be used in a service object definition.
destination_port:
description:
- The destination port to be used in a service object definition.
protocol:
description:
- The IP protocol to be used in a service object definition. Valid values are I(tcp) or I(udp).
servicegroup:
description:
- A group of service objects.
services:
description:
- The group of service objects used in a servicegroup definition.
description:
description:
- The description of the object.
tag_name:
description:
- The name of an object or rule tag.
color:
description: >
- The color of the tag object. Valid values are I(red, green, blue, yellow, copper, orange, purple, gray,
light green, cyan, light gray, blue gray, lime, black, gold, and brown).
devicegroup:
description: >
- The name of the Panorama device group. The group must exist on Panorama. If device group is not defined it
is assumed that we are contacting a firewall.
required: false
default: None
'''
EXAMPLES = '''
- name: search for shared address object
panos_object:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
operation: 'find'
address: 'DevNet'
- name: create an address group in devicegroup using API key
panos_object:
ip_address: '{{ ip_address }}'
api_key: '{{ api_key }}'
operation: 'add'
addressgroup: 'Prod_DB_Svrs'
static_value: ['prod-db1', 'prod-db2', 'prod-db3']
description: 'Production DMZ database servers'
tag_name: 'DMZ'
devicegroup: 'DMZ Firewalls'
- name: create a global service for TCP 3306
panos_object:
ip_address: '{{ ip_address }}'
api_key: '{{ api_key }}'
operation: 'add'
serviceobject: 'mysql-3306'
destination_port: '3306'
protocol: 'tcp'
description: 'MySQL on tcp/3306'
- name: create a global tag
panos_object:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
operation: 'add'
tag_name: 'ProjectX'
color: 'yellow'
description: 'Associated with Project X'
- name: delete an address object from a devicegroup using API key
panos_object:
ip_address: '{{ ip_address }}'
api_key: '{{ api_key }}'
operation: 'delete'
addressobject: 'Win2K test'
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import get_exception
try:
import pan.xapi
from pan.xapi import PanXapiError
import pandevice
from pandevice import base
from pandevice import firewall
from pandevice import panorama
from pandevice import objects
import xmltodict
import json
HAS_LIB = True
except ImportError:
HAS_LIB = False
def get_devicegroup(device, devicegroup):
dg_list = device.refresh_devices()
for group in dg_list:
if isinstance(group, pandevice.panorama.DeviceGroup):
if group.name == devicegroup:
return group
return False
def find_object(device, dev_group, obj_name, obj_type):
# Get the firewall objects
obj_type.refreshall(device)
if isinstance(device, pandevice.firewall.Firewall):
addr = device.find(obj_name, obj_type)
return addr
elif isinstance(device, pandevice.panorama.Panorama):
addr = device.find(obj_name, obj_type)
if addr is None:
if dev_group:
device.add(dev_group)
obj_type.refreshall(dev_group)
addr = dev_group.find(obj_name, obj_type)
return addr
else:
return False
def create_object(**kwargs):
if kwargs['addressobject']:
newobject = objects.AddressObject(
name=kwargs['addressobject'],
value=kwargs['address'],
type=kwargs['address_type'],
description=kwargs['description'],
tag=kwargs['tag_name']
)
if newobject.type and newobject.value:
return newobject
else:
return False
elif kwargs['addressgroup']:
newobject = objects.AddressGroup(
name=kwargs['addressgroup'],
static_value=kwargs['static_value'],
dynamic_value=kwargs['dynamic_value'],
description=kwargs['description'],
tag=kwargs['tag_name']
)
if newobject.static_value or newobject.dynamic_value:
return newobject
else:
return False
elif kwargs['serviceobject']:
newobject = objects.ServiceObject(
name=kwargs['serviceobject'],
protocol=kwargs['protocol'],
source_port=kwargs['source_port'],
destination_port=kwargs['destination_port'],
tag=kwargs['tag_name']
)
if newobject.protocol and newobject.destination_port:
return newobject
else:
return False
elif kwargs['servicegroup']:
newobject = objects.ServiceGroup(
name=kwargs['servicegroup'],
value=kwargs['services'],
tag=kwargs['tag_name']
)
if newobject.value:
return newobject
else:
return False
elif kwargs['tag_name']:
newobject = objects.Tag(
name=kwargs['tag_name'],
color=kwargs['color'],
comments=kwargs['description']
)
if newobject.name:
return newobject
else:
return False
else:
return False
def add_object(device, dev_group, new_object):
if dev_group:
dev_group.add(new_object)
else:
device.add(new_object)
new_object.create()
return True
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(no_log=True),
username=dict(default='admin'),
api_key=dict(no_log=True),
operation=dict(required=True, choices=['add', 'update', 'delete', 'find']),
addressobject=dict(default=None),
addressgroup=dict(default=None),
serviceobject=dict(default=None),
servicegroup=dict(default=None),
address=dict(default=None),
address_type=dict(default='ip-netmask', choices=['ip-netmask', 'ip-range', 'fqdn']),
static_value=dict(type='list', default=None),
dynamic_value=dict(default=None),
protocol=dict(default=None, choices=['tcp', 'udp']),
source_port=dict(default=None),
destination_port=dict(default=None),
services=dict(type='list', default=None),
description=dict(default=None),
tag_name=dict(default=None),
color=dict(default=None, choices=['red', 'green', 'blue', 'yellow', 'copper', 'orange', 'purple',
'gray', 'light green', 'cyan', 'light gray', 'blue gray',
'lime', 'black', 'gold', 'brown']),
devicegroup=dict(default=None)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False,
required_one_of=[['api_key', 'password']],
mutually_exclusive=[['addressobject', 'addressgroup',
'serviceobject', 'servicegroup',
'tag_name']]
)
if not HAS_LIB:
module.fail_json(msg='Missing required libraries.')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
api_key = module.params['api_key']
operation = module.params['operation']
addressobject = module.params['addressobject']
addressgroup = module.params['addressgroup']
serviceobject = module.params['serviceobject']
servicegroup = module.params['servicegroup']
address = module.params['address']
address_type = module.params['address_type']
static_value = module.params['static_value']
dynamic_value = module.params['dynamic_value']
protocol = module.params['protocol']
source_port = module.params['source_port']
destination_port = module.params['destination_port']
services = module.params['services']
description = module.params['description']
tag_name = module.params['tag_name']
color = module.params['color']
devicegroup = module.params['devicegroup']
# Create the device with the appropriate pandevice type
device = base.PanDevice.create_from_device(ip_address, username, password, api_key=api_key)
# If Panorama, validate the devicegroup
dev_group = None
if devicegroup and isinstance(device, panorama.Panorama):
dev_group = get_devicegroup(device, devicegroup)
if dev_group:
device.add(dev_group)
else:
module.fail_json(msg='\'%s\' device group not found in Panorama. Is the name correct?' % devicegroup)
# What type of object are we talking about?
if addressobject:
obj_name = addressobject
obj_type = objects.AddressObject
elif addressgroup:
obj_name = addressgroup
obj_type = objects.AddressGroup
elif serviceobject:
obj_name = serviceobject
obj_type = objects.ServiceObject
elif servicegroup:
obj_name = servicegroup
obj_type = objects.ServiceGroup
elif tag_name:
obj_name = tag_name
obj_type = objects.Tag
else:
module.fail_json(msg='No object type defined!')
# Which operation shall we perform on the object?
if operation == "find":
# Search for the object
match = find_object(device, dev_group, obj_name, obj_type)
# If found, format and return the result
if match:
match_dict = xmltodict.parse(match.element_str())
module.exit_json(
stdout_lines=json.dumps(match_dict, indent=2),
msg='Object matched'
)
else:
module.fail_json(msg='Object \'%s\' not found. Is the name correct?' % obj_name)
elif operation == "delete":
# Search for the object
match = find_object(device, dev_group, obj_name, obj_type)
# If found, delete it
if match:
try:
match.delete()
except PanXapiError:
exc = get_exception()
module.fail_json(msg=exc.message)
module.exit_json(changed=True, msg='Object \'%s\' successfully deleted' % obj_name)
else:
module.fail_json(msg='Object \'%s\' not found. Is the name correct?' % obj_name)
elif operation == "add":
# Search for the object. Fail if found.
match = find_object(device, dev_group, obj_name, obj_type)
if match:
module.fail_json(msg='Object \'%s\' already exists. Use operation: \'update\' to change it.' % obj_name)
else:
try:
new_object = create_object(
addressobject=addressobject,
addressgroup=addressgroup,
serviceobject=serviceobject,
servicegroup=servicegroup,
address=address,
address_type=address_type,
static_value=static_value,
dynamic_value=dynamic_value,
protocol=protocol,
source_port=source_port,
destination_port=destination_port,
services=services,
description=description,
tag_name=tag_name,
color=color
)
changed = add_object(device, dev_group, new_object)
except PanXapiError:
exc = get_exception()
module.fail_json(msg=exc.message)
module.exit_json(changed=changed, msg='Object \'%s\' successfully added' % obj_name)
elif operation == "update":
# Search for the object. Update if found.
match = find_object(device, dev_group, obj_name, obj_type)
if match:
try:
new_object = create_object(
addressobject=addressobject,
addressgroup=addressgroup,
serviceobject=serviceobject,
servicegroup=servicegroup,
address=address,
address_type=address_type,
static_value=static_value,
dynamic_value=dynamic_value,
protocol=protocol,
source_port=source_port,
destination_port=destination_port,
services=services,
description=description,
tag_name=tag_name,
color=color
)
changed = add_object(device, dev_group, new_object)
except PanXapiError:
exc = get_exception()
module.fail_json(msg=exc.message)
module.exit_json(changed=changed, msg='Object \'%s\' successfully updated.' % obj_name)
else:
module.fail_json(msg='Object \'%s\' does not exist. Use operation: \'add\' to add it.' % obj_name)
if __name__ == '__main__':
main()
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/panos/panos_object.py
|
Python
|
bsd-3-clause
| 16,678
|
"""Module containing class `ImportArchiveDataCommand`."""
from vesper.command.command import Command, CommandSyntaxError
from vesper.singleton.extension_manager import extension_manager
import vesper.command.command_utils as command_utils
class ImportCommand(Command):
extension_name = 'import'
def __init__(self, args):
super().__init__(args)
importer_spec = command_utils.get_required_arg('importer', args)
self._importer = _create_importer(importer_spec)
def execute(self, context):
return self._importer.execute(context)
def _create_importer(importer_spec):
try:
name = importer_spec['name']
except KeyError:
raise CommandSyntaxError('Missing required importer name.')
cls = _get_importer_class(name)
arguments = importer_spec.get('arguments', {})
return cls(arguments)
def _get_importer_class(name):
classes = extension_manager.get_extensions('Importer')
try:
return classes[name]
except KeyError:
raise CommandSyntaxError(
'Unrecognized importer name "{}".'.format(name))
|
HaroldMills/Vesper
|
vesper/command/import_command.py
|
Python
|
mit
| 1,160
|
from PdLANParty import PdLANParty
|
ruohoruotsi/pure-data-workbench
|
RjDj Scene Composer Pack/RJC-1000.app/Contents/Resources/PdLANParty/__init__.py
|
Python
|
mit
| 34
|
import json
from .problem import Problem
class BinJSONEncoder(json.JSONEncoder):
def default(self, o):
return o.to_json()
class Bin:
def __init__(self, node, capacity):
self.node = node
self.capacity = capacity.copy()
self.remaining_capacity = capacity.copy()
self.dimensions = len(capacity)
def __getitem__(self, index):
return self.capacity[index]
def __setitem__(self, index, value):
self.capacity[index] = value
def __str__(self):
return "<{} - {} - {}>".format(str(self.node), self.capacity, self.remaining_capacity)
def to_json(self):
return { "capacity": self.capacity, "remaining_capacity": self.remaining_capacity }
def get_remaining_capacity(self, type="online"):
if type == "online":
self._update_remaining_capacity()
return self.remaining_capacity
def has_capacity_for(self, item, type="online"):
print(self)
print(item)
if type == "online":
self._update_remaining_capacity()
for i in range(self.dimensions):
if item[i] > self.remaining_capacity[i] - self.capacity[i] * Problem.RESERVE:
return False
return True
# For offline bin packing, we remove the used capacity here
def add_item(self, item):
for i in range(self.dimensions):
self.remaining_capacity[i] -= item[i]
def _update_remaining_capacity(self):
status = self.node.status()
cpu = 0
for core, usage in status["cpus"].items():
cpu += usage
net = 0
for rxtx, value in status["net"]["docker0"].items():
net += value
self.remaining_capacity[0] = self.capacity[0] - (cpu / 100)
self.remaining_capacity[1] = status["free_memory"]
self.remaining_capacity[2] = self.capacity[2] - net
|
Soulou/msc-thesis-container-balancer-controller
|
balance/bin.py
|
Python
|
mit
| 1,894
|
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Osmo Salomaa
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import aeidon
class TestSetAgent(aeidon.TestCase):
def setup_method(self, method):
self.project = self.new_project()
@aeidon.deco.reversion_test
def test_set_duration(self):
subtitles = self.project.subtitles
self.project.set_duration(0, "00:01:11.111")
assert subtitles[0].duration_time == "00:01:11.111"
@aeidon.deco.reversion_test
def test_set_end(self):
subtitles = self.project.subtitles
self.project.set_end(0, 600000)
assert subtitles[0].end_frame == 600000
@aeidon.deco.reversion_test
def test_set_main_text(self):
subtitles = self.project.subtitles
self.project.set_main_text(0, "m")
assert subtitles[0].main_text == "m"
@aeidon.deco.reversion_test
def test_set_start(self):
subtitles = self.project.subtitles
self.project.set_start(0, -100.0)
assert subtitles[0].start_seconds == -100.0
@aeidon.deco.reversion_test
def test_set_start__reorder(self):
subtitles = self.project.subtitles
text_0 = subtitles[0].main_text
text_3 = subtitles[3].main_text
self.project.set_start(3, -1000)
assert subtitles[0].start_frame == -1000
assert subtitles[0].main_text == text_3
assert subtitles[1].main_text == text_0
@aeidon.deco.reversion_test
def test_set_text__main(self):
subtitles = self.project.subtitles
self.project.set_text(0, aeidon.documents.MAIN, "m")
assert subtitles[0].main_text == "m"
@aeidon.deco.reversion_test
def test_set_text__translation(self):
subtitles = self.project.subtitles
self.project.set_text(0, aeidon.documents.TRAN, "t")
assert subtitles[0].tran_text == "t"
@aeidon.deco.reversion_test
def test_set_translation_text(self):
subtitles = self.project.subtitles
self.project.set_translation_text(0, "t")
assert subtitles[0].tran_text == "t"
|
otsaloma/gaupol
|
aeidon/agents/test/test_set.py
|
Python
|
gpl-3.0
| 2,672
|
from __future__ import absolute_import
from markdown import Markdown
from skylines.lib.markdown.urlize import UrlizeExtension
__all__ = ['markdown']
urlize = UrlizeExtension()
markdown = Markdown(extensions=['nl2br', urlize], safe_mode='escape')
|
dkm/skylines
|
skylines/lib/markdown/__init__.py
|
Python
|
agpl-3.0
| 248
|
# -*- coding: utf-'8' "-*-"
from hashlib import sha1
import logging
import urllib
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_buckaroo.controllers.main import BuckarooController
from openerp.osv import osv, fields
from openerp.tools.float_utils import float_compare
_logger = logging.getLogger(__name__)
def normalize_keys_upper(data):
"""Set all keys of a dictionnary to uppercase
Buckaroo parameters names are case insensitive
convert everything to upper case to be able to easily detected the presence
of a parameter by checking the uppercase key only
"""
return dict((key.upper(), val) for key, val in data.items())
class AcquirerBuckaroo(osv.Model):
_inherit = 'payment.acquirer'
def _get_buckaroo_urls(self, cr, uid, environment, context=None):
""" Buckaroo URLs
"""
if environment == 'prod':
return {
'buckaroo_form_url': 'https://checkout.buckaroo.nl/html/',
}
else:
return {
'buckaroo_form_url': 'https://testcheckout.buckaroo.nl/html/',
}
def _get_providers(self, cr, uid, context=None):
providers = super(AcquirerBuckaroo, self)._get_providers(cr, uid, context=context)
providers.append(['buckaroo', 'Buckaroo'])
return providers
_columns = {
'brq_websitekey': fields.char('WebsiteKey', required_if_provider='buckaroo'),
'brq_secretkey': fields.char('SecretKey', required_if_provider='buckaroo'),
}
def _buckaroo_generate_digital_sign(self, acquirer, inout, values):
""" Generate the shasign for incoming or outgoing communications.
:param browse acquirer: the payment.acquirer browse record. It should
have a shakey in shaky out
:param string inout: 'in' (openerp contacting buckaroo) or 'out' (buckaroo
contacting openerp).
:param dict values: transaction values
:return string: shasign
"""
assert inout in ('in', 'out')
assert acquirer.provider == 'buckaroo'
keys = "add_returndata Brq_amount Brq_culture Brq_currency Brq_invoicenumber Brq_return Brq_returncancel Brq_returnerror Brq_returnreject brq_test Brq_websitekey".split()
def get_value(key):
if values.get(key):
return values[key]
return ''
values = dict(values or {})
if inout == 'out':
for key in values.keys():
# case insensitive keys
if key.upper() == 'BRQ_SIGNATURE':
del values[key]
break
items = sorted(values.items(), key=lambda (x, y): x.lower())
sign = ''.join('%s=%s' % (k, urllib.unquote_plus(v)) for k, v in items)
else:
sign = ''.join('%s=%s' % (k,get_value(k)) for k in keys)
#Add the pre-shared secret key at the end of the signature
sign = sign + acquirer.brq_secretkey
if isinstance(sign, str):
# TODO: remove me? should not be used
sign = urlparse.parse_qsl(sign)
shasign = sha1(sign.encode('utf-8')).hexdigest()
return shasign
def buckaroo_form_generate_values(self, cr, uid, id, partner_values, tx_values, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
acquirer = self.browse(cr, uid, id, context=context)
buckaroo_tx_values = dict(tx_values)
buckaroo_tx_values.update({
'Brq_websitekey': acquirer.brq_websitekey,
'Brq_amount': tx_values['amount'],
'Brq_currency': tx_values['currency'] and tx_values['currency'].name or '',
'Brq_invoicenumber': tx_values['reference'],
'brq_test': False if acquirer.environment == 'prod' else True,
'Brq_return': '%s' % urlparse.urljoin(base_url, BuckarooController._return_url),
'Brq_returncancel': '%s' % urlparse.urljoin(base_url, BuckarooController._cancel_url),
'Brq_returnerror': '%s' % urlparse.urljoin(base_url, BuckarooController._exception_url),
'Brq_returnreject': '%s' % urlparse.urljoin(base_url, BuckarooController._reject_url),
'Brq_culture': (partner_values.get('lang') or 'en_US').replace('_', '-'),
})
if buckaroo_tx_values.get('return_url'):
buckaroo_tx_values['add_returndata'] = buckaroo_tx_values.pop('return_url')
else:
buckaroo_tx_values['add_returndata'] = ''
buckaroo_tx_values['Brq_signature'] = self._buckaroo_generate_digital_sign(acquirer, 'in', buckaroo_tx_values)
return partner_values, buckaroo_tx_values
def buckaroo_get_form_action_url(self, cr, uid, id, context=None):
acquirer = self.browse(cr, uid, id, context=context)
return self._get_buckaroo_urls(cr, uid, acquirer.environment, context=context)['buckaroo_form_url']
class TxBuckaroo(osv.Model):
_inherit = 'payment.transaction'
# buckaroo status
_buckaroo_valid_tx_status = [190]
_buckaroo_pending_tx_status = [790, 791, 792, 793]
_buckaroo_cancel_tx_status = [890, 891]
_buckaroo_error_tx_status = [490, 491, 492]
_buckaroo_reject_tx_status = [690]
_columns = {
'buckaroo_txnid': fields.char('Transaction ID'),
}
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _buckaroo_form_get_tx_from_data(self, cr, uid, data, context=None):
""" Given a data dict coming from buckaroo, verify it and find the related
transaction record. """
origin_data = dict(data)
data = normalize_keys_upper(data)
reference, pay_id, shasign = data.get('BRQ_INVOICENUMBER'), data.get('BRQ_PAYMENT'), data.get('BRQ_SIGNATURE')
if not reference or not pay_id or not shasign:
error_msg = 'Buckaroo: received data with missing reference (%s) or pay_id (%s) or shashign (%s)' % (reference, pay_id, shasign)
_logger.error(error_msg)
raise ValidationError(error_msg)
tx_ids = self.search(cr, uid, [('reference', '=', reference)], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = 'Buckaroo: received data for reference %s' % (reference)
if not tx_ids:
error_msg += '; no order found'
else:
error_msg += '; multiple order found'
_logger.error(error_msg)
raise ValidationError(error_msg)
tx = self.pool['payment.transaction'].browse(cr, uid, tx_ids[0], context=context)
#verify shasign
shasign_check = self.pool['payment.acquirer']._buckaroo_generate_digital_sign(tx.acquirer_id, 'out', origin_data)
if shasign_check.upper() != shasign.upper():
error_msg = 'Buckaroo: invalid shasign, received %s, computed %s, for data %s' % (shasign, shasign_check, data)
_logger.error(error_msg)
raise ValidationError(error_msg)
return tx
def _buckaroo_form_get_invalid_parameters(self, cr, uid, tx, data, context=None):
invalid_parameters = []
data = normalize_keys_upper(data)
if tx.acquirer_reference and data.get('BRQ_TRANSACTIONS') != tx.acquirer_reference:
invalid_parameters.append(('Transaction Id', data.get('BRQ_TRANSACTIONS'), tx.acquirer_reference))
# check what is buyed
if float_compare(float(data.get('BRQ_AMOUNT', '0.0')), tx.amount, 2) != 0:
invalid_parameters.append(('Amount', data.get('BRQ_AMOUNT'), '%.2f' % tx.amount))
if data.get('BRQ_CURRENCY') != tx.currency_id.name:
invalid_parameters.append(('Currency', data.get('BRQ_CURRENCY'), tx.currency_id.name))
return invalid_parameters
def _buckaroo_form_validate(self, cr, uid, tx, data, context=None):
data = normalize_keys_upper(data)
status_code = int(data.get('BRQ_STATUSCODE','0'))
if status_code in self._buckaroo_valid_tx_status:
tx.write({
'state': 'done',
'buckaroo_txnid': data.get('BRQ_TRANSACTIONS'),
})
return True
elif status_code in self._buckaroo_pending_tx_status:
tx.write({
'state': 'pending',
'buckaroo_txnid': data.get('BRQ_TRANSACTIONS'),
})
return True
elif status_code in self._buckaroo_cancel_tx_status:
tx.write({
'state': 'cancel',
'buckaroo_txnid': data.get('BRQ_TRANSACTIONS'),
})
return True
else:
error = 'Buckaroo: feedback error'
_logger.info(error)
tx.write({
'state': 'error',
'state_message': error,
'buckaroo_txnid': data.get('BRQ_TRANSACTIONS'),
})
return False
|
ncliam/serverpos
|
openerp/addons/payment_buckaroo/models/buckaroo.py
|
Python
|
agpl-3.0
| 9,117
|
#!/usr/bin/env python
'''
OWASP ZSC
https://www.owasp.org/index.php/OWASP_ZSC_Tool_Project
https://github.com/zscproject/OWASP-ZSC
http://api.z3r0d4y.com/
https://groups.google.com/d/forum/owasp-zsc [ owasp-zsc[at]googlegroups[dot]com ]
'''
import binascii
import random
import string
from core.compatible import version
_version = version()
def encode(f):
hex_arr = []
val_names = []
data = ''
eval = ''
n = 0
m = 0
for line in f:
if _version is 2:
hex_arr.append(str(binascii.b2a_hex(line)))
if _version is 3:
hex_arr.append(str((binascii.b2a_hex(str(line).encode('latin-1'))
).decode('latin-1')))
length = len(hex_arr)
while (length != 0):
val_names.append(random.choice(string.ascii_lowercase) + ''.join(
random.choice(string.ascii_lowercase + string.ascii_uppercase)
for i in range(50)))
length -= 1
for hex in hex_arr:
data += val_names[n] + ' = "' + str(hex) + '"\n'
n += 1
while (m <= n - 1):
eval += val_names[m] + '.to_s + '
m += 1
var_data = random.choice(string.ascii_lowercase) + ''.join(
random.choice(string.ascii_lowercase + string.ascii_uppercase)
for i in range(50))
func_name = ''.join(
random.choice(string.ascii_lowercase + string.ascii_uppercase)
for i in range(50))
func_argv = random.choice(string.ascii_lowercase) + ''.join(
random.choice(string.ascii_lowercase + string.ascii_uppercase)
for i in range(50))
var_str = random.choice(string.ascii_lowercase) + ''.join(
random.choice(string.ascii_lowercase + string.ascii_uppercase)
for i in range(50))
f = '''
%s
def %s(%s)
%s = Array(%s).pack('H*')
return %s
end
%s = %s
eval(%s(%s))
''' % (data, func_name, func_argv, var_str, func_argv, var_str, var_data,
eval[:-2], func_name, var_data)
return f
def start(content,cli):
return str(str('=begin\n') + str(content.replace(
'=begin', '#=begin').replace('=end', '#=end')) + str('\n=end') + str(
encode(content)) + str('\n'))
|
zscproject/OWASP-ZSC
|
lib/encoder/ruby/simple_hex.py
|
Python
|
gpl-3.0
| 2,161
|
"""
@author: thom
"""
import logging
import time
from rdkit.Chem import AllChem as Chem
import networkx as nx
class Molecule(Chem.Mol):
"""A base representation of a Molecule with potential and kinetic energy.
* Potential energy is the energy required to form all bonds in the Molecule (therefore always negative as bond formation releases energy).
* Kinetic energy is, as usual, equal to 1/2 * mass * velocity ^ 2."""
def __init__(self, source, internal_energy=0, kinetic_energy=None, canonize=True, components=None, **kwargs):
"""
:param internal_energy: initial internal energy for this Molecule
:type internal_energy: float
:param kinetic_energy: initial kinetic energy for this Molecule
:type kinetic_energy: float
:param canonize: make implicit Hs explicit? Default True, but when copying Molecules we don't want to chance that these changes might be introduced
:type canonize: bool
:param components: for molecules that consist of multiple disjoint components, a mapping of atoms to component
:type components: list of set of indexes of atoms within the molecule, where each set represents a different component
"""
# just has to be unique over lifetime of simulation - id() only guarantees unique over lifetime of object, and time.clock() includes integer component which can overlap
self.global_id = "{}.{}".format(id(self), time.clock())
# Make all H explicit for later processing
if not isinstance(source, Chem.Mol):
source = Chem.MolFromSmiles(source)
if canonize:
source = Chem.AddHs(source)
Chem.Mol.__init__(self, source.ToBinary())
self._smiles = Chem.MolToSmiles(self)
if components is None and self._smiles.find(".") == -1: # if simple molecule with only one component, set components manually
components = [set(range(source.GetNumAtoms()))]
self._components = components
self._mass = sum([atom.GetMass() for atom in self.GetAtoms()])
self.set_internal_energy(internal_energy)
if kinetic_energy is not None:
self.set_kinetic_energy(kinetic_energy)
def get_state(self):
state = {'ke': self.get_kinetic_energy(), 'ie': self.get_internal_energy(), 'smiles': self._smiles}
return state
def get_mass(self):
return self._mass
def get_potential_energy(self, chemistry):
"""Return the energy required to form all of the molecule's bonds (therefore a negative quantity as bond formation releases energy)
WARNING: assumes formation energy = energy of breaking (symmetrical)
:rtype: float"""
return sum([chemistry.get_bond_energy(bond.GetBeginAtom(), bond.GetEndAtom(), end_bond_type=int(bond.GetBondType())) for bond in self.GetBonds()])
def get_internal_energy(self):
return self._internal_energy
def set_internal_energy(self, value):
if value < 0:
raise ValueError
self._internal_energy = value
def set_kinetic_energy(self, value):
if value < 0:
raise ValueError
self._kinetic_energy = value
def get_kinetic_energy(self):
return self._kinetic_energy
def split_molecule(self):
"""Allocate the initial energy proportional to the square of the mass of each resulting molecule.
A rather simplified calculation as we can't easily work out the transfer of energy as the reaction is changing
the interacting molecules
:rtype: list of Molecule or subclass"""
split_mols = [Molecule(smiles) for smiles in Chem.MolToSmiles(self).split(".")]
total_mass_squared = sum([mol.get_mass() ** 2 for mol in split_mols])
for mol in split_mols:
mol.set_internal_energy(self._internal_energy * (mol.get_mass() ** 2 / total_mass_squared))
mol.set_kinetic_energy(self.get_kinetic_energy() * (mol.get_mass() ** 2 / total_mass_squared))
return split_mols
def combine_molecules(self, mols):
"""Combine a number of molecules into one. Bookkeeping rather than Chemistry - does not connect molecules with bonds,
just groups them in RDKit. The kinetic energy of the combined molecule is assumed to be preserved - that is, we assume
a head-on collision.
:rtype: Molecule or subclass"""
combined_mol = mols[0]
combined_IE = mols[0].get_internal_energy()
combined_KE = mols[0].get_kinetic_energy()
num_atoms = mols[0].GetNumAtoms()
components = mols[0]._components
for mol in mols[1:]:
combined_mol = Chem.CombineMols(combined_mol, mol)
combined_IE += mol.get_internal_energy()
combined_KE += mol.get_kinetic_energy()
components.append(set(range(num_atoms, num_atoms + mol.GetNumAtoms())))
num_atoms = num_atoms + mol.GetNumAtoms()
return Molecule(combined_mol, internal_energy=combined_IE, kinetic_energy=combined_KE, components=components)
def same_component(self, idx1, idx2):
# Components is a list of sets of atom indexes in molecule - same set = same component
if self._components is None: # Catch-all - components should have been set in init() and multiple components initialized in previous call to combine_molecules
self._components = self._get_strongly_connected_components()
for component in self._components:
if idx1 in component and idx2 in component:
return True
return False
def get_total_formal_charge(self):
return sum([i.GetFormalCharge() for i in self.GetAtoms()])
def _assign_formal_charge(self):
"""OpenEye Charge Model - http://www.eyesopen.com/docs/toolkits/current/html/OEChem_TK-python/valence.html#subsection-valence-openeye-charge
The OpenEye formal charge model assigns formal charges to elements based upon their total valence.
In OEChem, this functionality is invoked by the OEAssignFormalCharges function.
If the formal charge on an atom is non-zero, it is left unchanged.
Hydrogen
If the valence isn't one, the formal charge is +1.
Carbon
If the valence is three, the formal charge is +1 if the atom has a polar neighbor, i.e. N, O or S, and formal charge -1 otherwise.
Nitrogen
If the valence is two, the formal charge is -1, and if the valence is four the formal charge is +1.
Oxygen
If the valence is one, the formal charge is -1, and if the valence is three the formal charge is +1."""
for i in self.GetAtoms():
valence = i.GetDegree()
formal_charge = 0
if i.GetAtomicNum() == 1: # H
if valence != 1:
formal_charge = 1
if i.GetAtomicNum() == 6: # C
if valence == 3:
formal_charge = -1
for neighbor in i.GetNeighbors():
if neighbor.GetAtomicNum() == 7 or neighbor.GetAtomicNum() == 8:
formal_charge = 1
if i.GetAtomicNum() == 7: # N
if valence == 2:
formal_charge = -1
if valence == 3:
formal_charge = 1
if i.GetAtomicNum() == 8: # O
if valence == 1:
formal_charge = -1
elif valence == 3:
formal_charge = 1
i.SetFormalCharge(formal_charge)
def _get_strongly_connected_components(self):
logging.info("Call to _get_strongly_connected_components")
g = nx.Graph()
for bond in self.GetBonds():
g.add_edge(bond.GetBeginAtomIdx(), bond.GetEndAtomIdx())
connected_components = list(nx.connected_components(g))
# Add single atoms as independent components
for idx in range(self.GetNumAtoms()):
if len(self.GetAtomWithIdx(idx).GetBonds()) == 0:
connected_components.append([idx])
return connected_components
def __deepcopy__(self, memo):
# don't mess with current structure - just leave it exactly as it is
return Molecule(self, internal_energy=self.get_internal_energy(), kinetic_energy=self.get_kinetic_energy(), canonize=False)
def __str__(self):
return Chem.MolToSmiles(self)
|
th0mmeke/toyworld
|
molecule.py
|
Python
|
gpl-3.0
| 8,478
|
import socket
class UDP:
def __init__(self, bind=("127.0.0.1", 4444)):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(bind)
def send_test_message(self, msg=b"test message", destination=("127.0.0.1", 3333)):
self.sock.sendto(msg, destination)
if __name__ == "__main__":
import sys
"""input data:
* IP address as string
* destination port
* msg
"""
user_entry = True
try:
user_data = ((sys.argv[1], int(sys.argv[2])), sys.argv[3])
except IndexError:
user_entry = False
udp = UDP()
if user_entry:
destination = user_data[0]
msg = user_data[1].encode("ascii")
msg = msg.replace(b"\\r\\n", b"\r\n")
udp.send_test_message(msg, destination)
else:
udp.send_test_message()
|
rCorvidae/OrionPI
|
src/tests/IO/TestSendTestMessage.py
|
Python
|
mit
| 836
|
# -*- coding: utf-8 -*-
from autograd import numpy as np
from lifelines.fitters import ParametricRegressionFitter
from lifelines.fitters.mixins import SplineFitterMixin
from lifelines.utils.safe_exp import safe_exp
class CRCSplineFitter(SplineFitterMixin, ParametricRegressionFitter):
"""
Below is an implementation of Crowther, Royston, Clements AFT cubic spline models. Internally, lifelines
uses this for survival model probability calibration, but it can also be useful for a highly flexible AFT model.
Parameters
-----------
n_baseline_knots: int
the number of knots in the cubic spline. If equal to 2, then the model is equal to the WeibullAFT model.
References
------------
Crowther MJ, Royston P, Clements M. A flexible parametric accelerated failure time model.
Examples
---------
.. code:: python
from lifelines import datasets, CRCSplineFitter
rossi = datasets.load_rossi()
regressors = {"beta_": "age + C(fin)", "gamma0_": "1", "gamma1_": "1", "gamma2_": "1"}
crc = CRCSplineFitter(n_baseline_knots=3).fit(rossi, "week", "arrest", regressors=regressors)
crc.print_summary()
"""
_KNOWN_MODEL = True
_FAST_MEDIAN_PREDICT = False
fit_intercept = True
_scipy_fit_method = "SLSQP"
def __init__(self, n_baseline_knots: int, *args, **kwargs):
assert n_baseline_knots > 1, "must be greater than 1"
self.n_baseline_knots = n_baseline_knots
self._fitted_parameter_names = ["beta_"] + ["gamma%d_" % i for i in range(0, self.n_baseline_knots)]
super(CRCSplineFitter, self).__init__(*args, **kwargs)
def _create_initial_point(self, Ts, E, entries, weights, Xs):
return [
{
**{"beta_": np.zeros(len(Xs["beta_"].columns)), "gamma0_": np.array([0.0]), "gamma1_": np.array([0.1])},
**{"gamma%d_" % i: np.array([0.0]) for i in range(2, self.n_baseline_knots)},
}
]
def set_knots(self, T, E):
self.knots = np.percentile(np.log(T[E.astype(bool).values]), np.linspace(5, 95, self.n_baseline_knots))
def _pre_fit_model(self, Ts, E, df):
self.set_knots(Ts[0], E)
def _cumulative_hazard(self, params, T, Xs):
# a negative sign makes the interpretation the same as other AFT models
Xbeta = -np.dot(Xs["beta_"], params["beta_"])
logT = np.log(T)
H = safe_exp(params["gamma0_"] + params["gamma1_"] * (logT + Xbeta))
for i in range(2, self.n_baseline_knots):
H *= safe_exp(
params["gamma%d_" % i]
* self.basis(logT + Xbeta, self.knots[i - 1], min_knot=self.knots[0], max_knot=self.knots[-1])
)
return H
|
CamDavidsonPilon/lifelines
|
lifelines/fitters/crc_spline_fitter.py
|
Python
|
mit
| 2,772
|
# Copyright (c) 2012-2014 Stephanie T. Douglas
# under the MIT License (see LICENSE.txt for full details)
import numpy as np
import cPickle
import triangle
import matplotlib.pyplot as plt
from fit_rossby import *
import get_data
# Set up 3 arrays:
# rossby number, L_{X}/L_{bol}, and the associated uncertainty
#data_rossby =
#data_ll =
#data_ull =
pdat,pobs,pobsnr,pobsr = get_data.get_data('P')
hdat,hobs,hobsnr,hobsr = get_data.get_data('H')
pros = pdat.field('ROSSBY')
hros = hdat.field('ROSSBY')
peqw,pueqw = pdat.field('AVG_EQW'),pdat.field('AVG_EQW_ERR')
pll,pull = pdat.field('AVG_LHA'),pdat.field('AVG_LHA_ERR')
heqw,hueqw = hdat.field('AVG_EQW'),hdat.field('AVG_EQW_ERR')
hll,hull = hdat.field('AVG_LHA'),hdat.field('AVG_LHA_ERR')
pbin = (pdat.field('BINARY')>0)
hbin = (hdat.field('BINARY')>0)
pmass = pdat.field('KH_MASS')
hmass = hdat.field('KH_MASS')
pperiods = pdat.field('PERIOD')
hperiods = hdat.field('PERIOD')
ppmem = pdat.field('ADAMPMEM')
hpmem = hdat.field('ROESER_PMEM')
pmem_threshold=70.0
pgood = np.where((pmass<=1.3) & (pmass>0.1) & (pbin==False) & (peqw-pueqw>0)
& ((ppmem>=pmem_threshold) | (ppmem<0)) & (pperiods>0))[0]
hgood = np.where((hmass<=1.3) & (hmass>0.1) & (hbin==False) & (heqw-hueqw>0)
& ((hpmem>=pmem_threshold) | (hpmem<0)) & (hperiods>0))[0]
data_rossby = 10**np.append(pros[pgood],hros[hgood])
sort_order = np.argsort(data_rossby)
data_rossby = data_rossby[sort_order]
#print data_rossby
data_ll = np.append(pll[pgood],hll[hgood])
data_ll = data_ll[sort_order]
#print data_ll
data_ull = np.append(pull[pgood],hull[hgood])
data_ull = data_ull[sort_order]
#print data_ull
# Decide on your starting parameters
start_p = np.asarray([1e-4,0.1,-1.0])
# run the emcee wrapper function
samples = run_rossby_fit(start_p,data_rossby,data_ll,data_ull)
# Plot the output
plot_rossby(samples,data_rossby,data_ll,data_ull)
# Make a triangle plot
# (this won't be any good for publication - .ps/.eps won't do transparency)
triangle.corner(samples,labels=['sat_level (x10^-4)','turnover','beta'],quantiles=[0.16,0.50,0.84])
# adjust the ticklabels for easier reading
ax = plt.subplot(337)
xticks = ax.get_xticks()
new_labels = []
for xt in xticks:
new_labels =np.append(new_labels,str(xt*10000))
ax.set_xticklabels(new_labels)
# Uncomment to save the plot
plt.savefig('fit_rossby_corner_temp.png')
# Save the sample positions to a pkl file to be accessed later
outfile = open('fit_rossby_samples_temp.pkl','wb')
cPickle.dump(samples,outfile)
outfile.close()
# Write to a text file for posting online :)
print_pdf(samples,"fit_rossby_samples_temp.csv")
|
stephtdouglas/fit-rossby
|
example_script.py
|
Python
|
mit
| 2,621
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script installs and configures a Hadoop-YARN cluster using Ansible.
@author: e-science Dev-team
"""
import os
import json
from os.path import dirname, abspath, isfile
import logging
import subprocess
from backend.models import ClusterInfo, UserInfo, OrkaImage, OrkaImageCategory, Setting
from django_db_after_login import db_hadoop_update
from celery import current_task
from cluster_errors_constants import HADOOP_STATUS_ACTIONS, REVERSE_HADOOP_STATUS, REPORT, SUMMARY, \
error_ansible_playbook, const_cluster_status_pending, const_hadoop_status_format, const_hadoop_status_started, const_hadoop_status_stopped
from authenticate_user import unmask_token, encrypt_key
from ansible import errors
# Definitions of return value errors
# Ansible constants
playbook = 'site.yml'
ansible_playbook = dirname(abspath(__file__)) + '/ansible/' + playbook
ansible_hosts_prefix = 'ansible_hosts_'
ansible_verbosity = ' -v'
def install_yarn(*args):
"""
Calls ansible playbook for the installation of yarn and all
required dependencies. Also formats and starts yarn or cloudera hadoop distribution.
Takes positional arguments as args tuple.
args: token, hosts_list, master_ip, cluster_name, orka_image_uuid, ssh_file, replication_factor, dfs_blocksize
"""
from okeanos_utils import set_cluster_state
list_of_hosts = args[1]
master_hostname = list_of_hosts[0]['fqdn'].split('.', 1)[0] # list_of_host[0]['fqdn'] is like this: snf-654916.vm.okeanos.grnet.gr and we only need the snf-654916 part
cluster_size = len(list_of_hosts)
cluster_id = args[3].rsplit('-', 1)[1] # get the cluster's id
# Create ansible_hosts file
try:
hosts_filename = create_ansible_hosts(args[3], list_of_hosts, args[2])
# Run Ansible playbook
ansible_create_cluster(hosts_filename, cluster_size, args[4], args[5], args[0], args[6], args[7], args[8])
# Format and start Hadoop cluster
set_cluster_state(args[0], cluster_id,
'YARN Cluster is active', status='Active',
master_IP=args[2])
ansible_manage_cluster(cluster_id, 'format')
ansible_manage_cluster(cluster_id, 'start')
except Exception, e:
msg = 'Error while running Ansible %s' % e
raise RuntimeError(msg, error_ansible_playbook)
finally:
subprocess.call('rm /tmp/master_' + master_hostname + '_pub_key_* ', shell=True)
logging.log(SUMMARY, 'YARN Cluster is active. You can access it through '
+ args[2] + ':8088/cluster')
def create_ansible_hosts(cluster_name, list_of_hosts, hostname_master):
"""
Function that creates the ansible_hosts file and
returns the name of the file.
"""
# Turns spaces to underscores from cluster name postfixed with cluster id
# and appends it to ansible_hosts. The ansible_hosts file will now have a
# unique name
hosts_filename = os.getcwd() + '/' + ansible_hosts_prefix + cluster_name.replace(" ", "_")
# Create ansible_hosts file and write all information that is
# required from Ansible playbook.
master_host = '[master]'
slaves_host = '[slaves]'
cluster_id = cluster_name.rsplit('-',1)[1]
cluster = ClusterInfo.objects.get(id=cluster_id)
if 'cdh' in cluster.os_image.lower():
master_host = '[master_cloud]'
slaves_host = '[slaves_cloud]'
with open(hosts_filename, 'w+') as target:
target.write(master_host + '\n')
target.write(list_of_hosts[0]['fqdn'])
target.write(' private_ip='+list_of_hosts[0]['private_ip'])
target.write(' ansible_ssh_host=' + hostname_master + '\n' + '\n')
target.write(slaves_host +'\n')
for host in list_of_hosts[1:]:
target.write(host['fqdn'])
target.write(' private_ip='+host['private_ip'])
target.write(' ansible_ssh_port='+str(host['port']))
target.write(' ansible_ssh_host='+ hostname_master +'\n')
return hosts_filename
def modify_ansible_hosts_file(cluster_name, list_of_hosts='', master_ip='', action='', slave_hostname=''):
"""
Function that modifies the ansible_hosts file with
the scaled cluster slave hostnames, adding the new slaves,
deleting the removed slaves or joining in one entry all the slaves.
"""
hosts_filename = os.getcwd() + '/' + ansible_hosts_prefix + cluster_name.replace(" ", "_")
# Create ansible_hosts file and write all information that is
# required from Ansible playbook.
if action == 'add_slaves':
new_slaves_host = '[new_slaves]'
with open(hosts_filename, 'a+') as target:
target.write(new_slaves_host + '\n')
for host in list_of_hosts:
target.write('{0} private_ip={1} ansible_ssh_port={2} ansible_ssh_host={3}\n'.format(host['fqdn'],
host['private_ip'],
str(host['port']), master_ip))
elif action == 'remove_slaves':
remove_slaves_command = "sed -i.bak '/{0}/d' {1}".format(slave_hostname, hosts_filename)
subprocess.call(remove_slaves_command, shell=True)
elif action == 'join_slaves':
join_slaves_command = "sed -i.bak '/\[new\_slaves\]/d' {0}".format(hosts_filename)
subprocess.call(join_slaves_command, shell=True)
return hosts_filename
def map_command_to_ansible_actions(action, image, pre_action_status):
"""
Function to map the start,stop or format commands to the correct ansible actions in
correct sequence, depending also on the image used. Returns a list of the Ansible
tags that will run.
"""
ansible_tags = decode_json(image['ansible_cluster_action_tags'])
# format request for started cluster > stop [> clean ]> format > start
# if stopped cluster, then only format
if action == "format" and pre_action_status == const_hadoop_status_started:
return ['stop', 'CLOUDstop', action, 'start', 'CLOUDstart'] if 'cloudera' in image['category_name'].lower() else \
[ansible_tags['stop'], action, ansible_tags['start']]
elif action == "format" and pre_action_status != const_hadoop_status_started:
return ['format']
else:
return [action, 'CLOUD{0}'.format(action)] if 'cloudera' in image['category_name'].lower() else [ansible_tags[action]]
def get_image_category(image_name='', image_uuid=''):
"""
Return Orka Image Category properties of the requested image as a json.
"""
if image_name:
orka_image_category_id = OrkaImage.objects.get(image_name=image_name).image_category
elif image_uuid:
orka_image_category_id = OrkaImage.objects.get(image_pithos_uuid=image_uuid).image_category
image_category = OrkaImageCategory.objects.filter(category_name=orka_image_category_id.category_name).values()
return image_category[0]
def decode_json(object):
"""
Decode Json to python dictionary object
"""
return json.loads(object)
def ansible_manage_cluster(cluster_id, action):
"""
Perform an action on a Hadoop cluster depending on the action arg.
Updates database only when starting or stopping a cluster.
"""
cluster = ClusterInfo.objects.get(id=cluster_id)
pre_action_status = cluster.hadoop_status
cluster_status = cluster.cluster_status
if action == 'format':
current_hadoop_status = REVERSE_HADOOP_STATUS[cluster.hadoop_status]
else:
current_hadoop_status = action
image_tags = get_image_category(image_name=cluster.os_image)
decoded_image_tags = decode_json(image_tags['ansible_cluster_config_tags'])
role = decoded_image_tags['role']
ANSIBLE_SEQUENCE = map_command_to_ansible_actions(action, image_tags, pre_action_status)
cluster_name_postfix_id = '%s%s%s' % (cluster.cluster_name, '-', cluster_id)
hosts_filename = os.getcwd() + '/' + ansible_hosts_prefix + cluster_name_postfix_id.replace(" ", "_")
if isfile(hosts_filename):
state = '%s %s' %(HADOOP_STATUS_ACTIONS[action][1], cluster.cluster_name)
current_task.update_state(state=state)
db_hadoop_update(cluster_id, 'Pending', state)
debug_file_name = "create_cluster_debug_" + hosts_filename.split(ansible_hosts_prefix, 1)[1] + ".log"
ansible_log = " >> " + os.path.join(os.getcwd(), debug_file_name)
ansible_code_generic = 'ansible-playbook -i {0} {1} {2} -e "choose_role={3} manage_cluster={3}" -t'.format(hosts_filename, ansible_playbook, ansible_verbosity, role)
for hadoop_action in ANSIBLE_SEQUENCE:
ansible_code = '{0} {1} {2}'.format(ansible_code_generic, hadoop_action, ansible_log)
try:
execute_ansible_playbook(ansible_code)
except Exception, e:
msg = str(e.args[0])
db_hadoop_update(cluster_id, 'undefined', msg)
raise RuntimeError(msg)
msg = 'Cluster %s %s' %(cluster.cluster_name, HADOOP_STATUS_ACTIONS[action][2])
db_hadoop_update(cluster_id, current_hadoop_status, msg)
return msg
else:
msg = 'Ansible hosts file [%s] does not exist' % hosts_filename
raise RuntimeError(msg)
def ansible_create_cluster(hosts_filename, cluster_size, orka_image_uuid, ssh_file, token, replication_factor,
dfs_blocksize, admin_password):
"""
Calls the ansible playbook that installs and configures
hadoop and everything needed for hadoop to be functional.
hosts_filename is the name of ansible_hosts file.
If a specific hadoop image was used in the VMs creation, ansible
playbook will not install Hadoop-YARN and will only perform
the appropriate configuration.
"""
logging.log(REPORT, ' Ansible starts YARN installation on master and '
'slave nodes')
level = logging.getLogger().getEffectiveLevel()
# chosen image includes role and tags properties
image_tags = get_image_category(image_uuid=orka_image_uuid)
decoded_image_tags = decode_json(image_tags['ansible_cluster_config_tags'])
# Create debug file for ansible
debug_file_name = "create_cluster_debug_" + hosts_filename.split(ansible_hosts_prefix, 1)[1] + ".log"
ansible_log = " >> " + os.path.join(os.getcwd(), debug_file_name)
# find ansible playbook (site.yml)
uuid = UserInfo.objects.get(okeanos_token=token).uuid
# Create command that executes ansible playbook
ansible_code = 'ansible-playbook -i {0} {1} {2} '.format(hosts_filename, ansible_playbook, ansible_verbosity) + \
'-f {0} -e "choose_role={1} ssh_file_name={2} token={3} '.format(str(cluster_size), decoded_image_tags['role'], ssh_file, unmask_token(encrypt_key, token)) + \
'dfs_blocksize={0}m dfs_replication={1} uuid={2} admin_password={3}" {4}'.format(dfs_blocksize, replication_factor, uuid, admin_password, decoded_image_tags['tags'])
# Execute ansible
ansible_code += ansible_log
execute_ansible_playbook(ansible_code)
def ansible_scale_cluster(hosts_filename, new_slaves_size=1, orka_image_uuid='', user_id='',action='add_slaves', slave_hostname=''):
"""
Calls the ansible playbook that configures the added nodes
in a scaled hadoop cluster or decommissions the node to be removed.
"""
if action == 'add_slaves':
image_tags = get_image_category(image_uuid=orka_image_uuid)
decoded_image_tags = decode_json(image_tags['ansible_cluster_config_tags'])
list_of_ansible_tags = decoded_image_tags['tags'].split(',')
scale_cluster_tags = ['{0}scale'.format(t) for t in list_of_ansible_tags]
tags = ",".join(scale_cluster_tags)
elif action == 'remove_slaves':
tags = '-t remove_yarn_nodes'
elif action == 'rollback_scale_cluster':
tags = '-t rollback_cluster'
# Create debug file for ansible
debug_file_name = "create_cluster_debug_" + hosts_filename.split(ansible_hosts_prefix, 1)[1] + ".log"
ansible_log = " >> " + os.path.join(os.getcwd(), debug_file_name)
# -t postconfigscale
ansible_code = 'ansible-playbook -i {0} {1} {2} '.format(hosts_filename, ansible_playbook, ansible_verbosity) + \
'-f {0} -e "manage_cluster={1} hostname={2} uuid={3}" {4}'.format(str(new_slaves_size), action, slave_hostname, user_id, tags)
# Execute ansible
ansible_code += ansible_log
try:
execute_ansible_playbook(ansible_code)
except Exception, e:
msg = str(e.args[0])
raise RuntimeError(msg)
def execute_ansible_playbook(ansible_command):
"""
Executes ansible command given as argument
"""
# get any verbose codes we might have saved in our backend
# .first() will helpfully return None if no match found instead of throwing an exception
ansible_known_codes = Setting.objects.filter(section='Ansible',property_name='Errors').first()
if ansible_known_codes is not None:
ansible_known_codes = decode_json(ansible_known_codes.serializable_value('property_value'))
if type(ansible_known_codes) is not dict:
ansible_known_codes = {}
else:
ansible_known_codes = {}
try:
exit_status = subprocess.call(ansible_command, shell=True)
if exit_status > 0:
exist_status_verbose = ansible_known_codes.get(str(exit_status),'error description is unknown')
msg = 'Ansible failed with exit status %d: %s' % (exit_status,exist_status_verbose)
raise RuntimeError(msg, exit_status)
except OSError as e:
msg = 'Ansible command execution failed %s' % e
raise RuntimeError(msg, e)
return 0
|
KPetsas/e-science
|
webapp/backend/run_ansible_playbooks.py
|
Python
|
agpl-3.0
| 13,851
|
"""
Tests for the plugin API
"""
from __future__ import absolute_import
from django.test import TestCase
from openedx.core.lib.plugins import PluginError
from openedx.core.lib.course_tabs import CourseTabPluginManager
class TestCourseTabApi(TestCase):
"""
Unit tests for the course tab plugin API
"""
def test_get_plugin(self):
"""
Verify that get_plugin works as expected.
"""
tab_type = CourseTabPluginManager.get_plugin("instructor")
self.assertEqual(tab_type.title, "Instructor")
with self.assertRaises(PluginError):
CourseTabPluginManager.get_plugin("no_such_type")
|
ESOedX/edx-platform
|
openedx/core/lib/tests/test_course_tab_api.py
|
Python
|
agpl-3.0
| 653
|
import array
import unittest
from test.support import import_module, get_attribute
import os, struct
fcntl = import_module('fcntl')
termios = import_module('termios')
get_attribute(termios, 'TIOCGPGRP') #Can't run tests without this feature
try:
tty = open("/dev/tty", "rb")
except OSError:
raise unittest.SkipTest("Unable to open /dev/tty")
else:
# Skip if another process is in foreground
r = fcntl.ioctl(tty, termios.TIOCGPGRP, " ")
tty.close()
rpgrp = struct.unpack("i", r)[0]
if rpgrp not in (os.getpgrp(), os.getsid(0)):
raise unittest.SkipTest("Neither the process group nor the session "
"are attached to /dev/tty")
del tty, r, rpgrp
try:
import pty
except ImportError:
pty = None
class IoctlTests(unittest.TestCase):
def test_ioctl(self):
# If this process has been put into the background, TIOCGPGRP returns
# the session ID instead of the process group id.
ids = (os.getpgrp(), os.getsid(0))
with open("/dev/tty", "rb") as tty:
r = fcntl.ioctl(tty, termios.TIOCGPGRP, " ")
rpgrp = struct.unpack("i", r)[0]
self.assertIn(rpgrp, ids)
def _check_ioctl_mutate_len(self, nbytes=None):
buf = array.array('i')
intsize = buf.itemsize
ids = (os.getpgrp(), os.getsid(0))
# A fill value unlikely to be in `ids`
fill = -12345
if nbytes is not None:
# Extend the buffer so that it is exactly `nbytes` bytes long
buf.extend([fill] * (nbytes // intsize))
self.assertEqual(len(buf) * intsize, nbytes) # sanity check
else:
buf.append(fill)
with open("/dev/tty", "rb") as tty:
r = fcntl.ioctl(tty, termios.TIOCGPGRP, buf, 1)
rpgrp = buf[0]
self.assertEqual(r, 0)
self.assertIn(rpgrp, ids)
def test_ioctl_mutate(self):
self._check_ioctl_mutate_len()
def test_ioctl_mutate_1024(self):
# Issue #9758: a mutable buffer of exactly 1024 bytes wouldn't be
# copied back after the system call.
self._check_ioctl_mutate_len(1024)
def test_ioctl_mutate_2048(self):
# Test with a larger buffer, just for the record.
self._check_ioctl_mutate_len(2048)
def test_ioctl_signed_unsigned_code_param(self):
if not pty:
raise unittest.SkipTest('pty module required')
mfd, sfd = pty.openpty()
try:
if termios.TIOCSWINSZ < 0:
set_winsz_opcode_maybe_neg = termios.TIOCSWINSZ
set_winsz_opcode_pos = termios.TIOCSWINSZ & 0xffffffff
else:
set_winsz_opcode_pos = termios.TIOCSWINSZ
set_winsz_opcode_maybe_neg, = struct.unpack("i",
struct.pack("I", termios.TIOCSWINSZ))
our_winsz = struct.pack("HHHH",80,25,0,0)
# test both with a positive and potentially negative ioctl code
new_winsz = fcntl.ioctl(mfd, set_winsz_opcode_pos, our_winsz)
new_winsz = fcntl.ioctl(mfd, set_winsz_opcode_maybe_neg, our_winsz)
finally:
os.close(mfd)
os.close(sfd)
if __name__ == "__main__":
unittest.main()
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/python/lib/test/test_ioctl.py
|
Python
|
gpl-3.0
| 3,271
|
# Copyright 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The **splunklib.binding** module provides a low-level binding interface to the
`Splunk REST API <http://docs.splunk.com/Documentation/Splunk/latest/RESTAPI/RESTcontents>`_.
This module handles the wire details of calling the REST API, such as
authentication tokens, prefix paths, URL encoding, and so on. Actual path
segments, ``GET`` and ``POST`` arguments, and the parsing of responses is left
to the user.
If you want a friendlier interface to the Splunk REST API, use the
:mod:`splunklib.client` module.
"""
import httplib
import logging
import socket
import ssl
import urllib
import io
import sys
import Cookie
from datetime import datetime
from functools import wraps
from StringIO import StringIO
from contextlib import contextmanager
from xml.etree.ElementTree import XML
try:
from xml.etree.ElementTree import ParseError
except ImportError, e:
from xml.parsers.expat import ExpatError as ParseError
from data import record
__all__ = [
"AuthenticationError",
"connect",
"Context",
"handler",
"HTTPError"
]
# If you change these, update the docstring
# on _authority as well.
DEFAULT_HOST = "localhost"
DEFAULT_PORT = "8089"
DEFAULT_SCHEME = "https"
def _log_duration(f):
@wraps(f)
def new_f(*args, **kwargs):
start_time = datetime.now()
val = f(*args, **kwargs)
end_time = datetime.now()
logging.debug("Operation took %s", end_time-start_time)
return val
return new_f
def _parse_cookies(cookie_str, dictionary):
"""Tries to parse any key-value pairs of cookies in a string,
then updates the the dictionary with any key-value pairs found.
**Example**::
dictionary = {}
_parse_cookies('my=value', dictionary)
# Now the following is True
dictionary['my'] == 'value'
:param cookie_str: A string containing "key=value" pairs from an HTTP "Set-Cookie" header.
:type cookie_str: ``str``
:param dictionary: A dictionary to update with any found key-value pairs.
:type dictionary: ``dict``
"""
parsed_cookie = Cookie.SimpleCookie(cookie_str)
for cookie in parsed_cookie.values():
dictionary[cookie.key] = cookie.coded_value
def _make_cookie_header(cookies):
"""
Takes a list of 2-tuples of key-value pairs of
cookies, and returns a valid HTTP ``Cookie``
header.
**Example**::
header = _make_cookie_header([("key", "value"), ("key_2", "value_2")])
# Now the following is True
header == "key=value; key_2=value_2"
:param cookies: A list of 2-tuples of cookie key-value pairs.
:type cookies: ``list`` of 2-tuples
:return: ``str` An HTTP header cookie string.
:rtype: ``str``
"""
return "; ".join("%s=%s" % (key, value) for key, value in cookies)
# Singleton values to eschew None
class _NoAuthenticationToken(object):
"""The value stored in a :class:`Context` or :class:`splunklib.client.Service`
class that is not logged in.
If a ``Context`` or ``Service`` object is created without an authentication
token, and there has not yet been a call to the ``login`` method, the token
field of the ``Context`` or ``Service`` object is set to
``_NoAuthenticationToken``.
Likewise, after a ``Context`` or ``Service`` object has been logged out, the
token is set to this value again.
"""
pass
class UrlEncoded(str):
"""This class marks URL-encoded strings.
It should be considered an SDK-private implementation detail.
Manually tracking whether strings are URL encoded can be difficult. Avoid
calling ``urllib.quote`` to replace special characters with escapes. When
you receive a URL-encoded string, *do* use ``urllib.unquote`` to replace
escapes with single characters. Then, wrap any string you want to use as a
URL in ``UrlEncoded``. Note that because the ``UrlEncoded`` class is
idempotent, making multiple calls to it is OK.
``UrlEncoded`` objects are identical to ``str`` objects (including being
equal if their contents are equal) except when passed to ``UrlEncoded``
again.
``UrlEncoded`` removes the ``str`` type support for interpolating values
with ``%`` (doing that raises a ``TypeError``). There is no reliable way to
encode values this way, so instead, interpolate into a string, quoting by
hand, and call ``UrlEncode`` with ``skip_encode=True``.
**Example**::
import urllib
UrlEncoded('%s://%s' % (scheme, urllib.quote(host)), skip_encode=True)
If you append ``str`` strings and ``UrlEncoded`` strings, the result is also
URL encoded.
**Example**::
UrlEncoded('ab c') + 'de f' == UrlEncoded('ab cde f')
'ab c' + UrlEncoded('de f') == UrlEncoded('ab cde f')
"""
def __new__(self, val='', skip_encode=False, encode_slash=False):
if isinstance(val, UrlEncoded):
# Don't urllib.quote something already URL encoded.
return val
elif skip_encode:
return str.__new__(self, val)
elif encode_slash:
return str.__new__(self, urllib.quote_plus(val))
else:
# When subclassing str, just call str's __new__ method
# with your class and the value you want to have in the
# new string.
return str.__new__(self, urllib.quote(val))
def __add__(self, other):
"""self + other
If *other* is not a ``UrlEncoded``, URL encode it before
adding it.
"""
if isinstance(other, UrlEncoded):
return UrlEncoded(str.__add__(self, other), skip_encode=True)
else:
return UrlEncoded(str.__add__(self, urllib.quote(other)), skip_encode=True)
def __radd__(self, other):
"""other + self
If *other* is not a ``UrlEncoded``, URL _encode it before
adding it.
"""
if isinstance(other, UrlEncoded):
return UrlEncoded(str.__radd__(self, other), skip_encode=True)
else:
return UrlEncoded(str.__add__(urllib.quote(other), self), skip_encode=True)
def __mod__(self, fields):
"""Interpolation into ``UrlEncoded``s is disabled.
If you try to write ``UrlEncoded("%s") % "abc", will get a
``TypeError``.
"""
raise TypeError("Cannot interpolate into a UrlEncoded object.")
def __repr__(self):
return "UrlEncoded(%s)" % repr(urllib.unquote(str(self)))
@contextmanager
def _handle_auth_error(msg):
"""Handle reraising HTTP authentication errors as something clearer.
If an ``HTTPError`` is raised with status 401 (access denied) in
the body of this context manager, reraise it as an
``AuthenticationError`` instead, with *msg* as its message.
This function adds no round trips to the server.
:param msg: The message to be raised in ``AuthenticationError``.
:type msg: ``str``
**Example**::
with _handle_auth_error("Your login failed."):
... # make an HTTP request
"""
try:
yield
except HTTPError as he:
if he.status == 401:
raise AuthenticationError(msg, he)
else:
raise
def _authentication(request_fun):
"""Decorator to handle autologin and authentication errors.
*request_fun* is a function taking no arguments that needs to
be run with this ``Context`` logged into Splunk.
``_authentication``'s behavior depends on whether the
``autologin`` field of ``Context`` is set to ``True`` or
``False``. If it's ``False``, then ``_authentication``
aborts if the ``Context`` is not logged in, and raises an
``AuthenticationError`` if an ``HTTPError`` of status 401 is
raised in *request_fun*. If it's ``True``, then
``_authentication`` will try at all sensible places to
log in before issuing the request.
If ``autologin`` is ``False``, ``_authentication`` makes
one roundtrip to the server if the ``Context`` is logged in,
or zero if it is not. If ``autologin`` is ``True``, it's less
deterministic, and may make at most three roundtrips (though
that would be a truly pathological case).
:param request_fun: A function of no arguments encapsulating
the request to make to the server.
**Example**::
import splunklib.binding as binding
c = binding.connect(..., autologin=True)
c.logout()
def f():
c.get("/services")
return 42
print _authentication(f)
"""
@wraps(request_fun)
def wrapper(self, *args, **kwargs):
if self.token is _NoAuthenticationToken and \
not self.has_cookies():
# Not yet logged in.
if self.autologin and self.username and self.password:
# This will throw an uncaught
# AuthenticationError if it fails.
self.login()
else:
# Try the request anyway without authentication.
# Most requests will fail. Some will succeed, such as
# 'GET server/info'.
with _handle_auth_error("Request aborted: not logged in."):
return request_fun(self, *args, **kwargs)
try:
# Issue the request
return request_fun(self, *args, **kwargs)
except HTTPError as he:
if he.status == 401 and self.autologin:
# Authentication failed. Try logging in, and then
# rerunning the request. If either step fails, throw
# an AuthenticationError and give up.
with _handle_auth_error("Autologin failed."):
self.login()
with _handle_auth_error(
"Autologin succeeded, but there was an auth error on "
"next request. Something is very wrong."):
return request_fun(self, *args, **kwargs)
elif he.status == 401 and not self.autologin:
raise AuthenticationError(
"Request failed: Session is not logged in.", he)
else:
raise
return wrapper
def _authority(scheme=DEFAULT_SCHEME, host=DEFAULT_HOST, port=DEFAULT_PORT):
"""Construct a URL authority from the given *scheme*, *host*, and *port*.
Named in accordance with RFC2396_, which defines URLs as::
<scheme>://<authority><path>?<query>
.. _RFC2396: http://www.ietf.org/rfc/rfc2396.txt
So ``https://localhost:8000/a/b/b?boris=hilda`` would be parsed as::
scheme := https
authority := localhost:8000
path := /a/b/c
query := boris=hilda
:param scheme: URL scheme (the default is "https")
:type scheme: "http" or "https"
:param host: The host name (the default is "localhost")
:type host: string
:param port: The port number (the default is 8089)
:type port: integer
:return: The URL authority.
:rtype: UrlEncoded (subclass of ``str``)
**Example**::
_authority() == "https://localhost:8089"
_authority(host="splunk.utopia.net") == "https://splunk.utopia.net:8089"
_authority(host="2001:0db8:85a3:0000:0000:8a2e:0370:7334") == \
"https://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:8089"
_authority(scheme="http", host="splunk.utopia.net", port="471") == \
"http://splunk.utopia.net:471"
"""
if ':' in host:
# IPv6 addresses must be enclosed in [ ] in order to be well
# formed.
host = '[' + host + ']'
return UrlEncoded("%s://%s:%s" % (scheme, host, port), skip_encode=True)
# kwargs: sharing, owner, app
def namespace(sharing=None, owner=None, app=None, **kwargs):
"""This function constructs a Splunk namespace.
Every Splunk resource belongs to a namespace. The namespace is specified by
the pair of values ``owner`` and ``app`` and is governed by a ``sharing`` mode.
The possible values for ``sharing`` are: "user", "app", "global" and "system",
which map to the following combinations of ``owner`` and ``app`` values:
"user" => {owner}, {app}
"app" => nobody, {app}
"global" => nobody, {app}
"system" => nobody, system
"nobody" is a special user name that basically means no user, and "system"
is the name reserved for system resources.
"-" is a wildcard that can be used for both ``owner`` and ``app`` values and
refers to all users and all apps, respectively.
In general, when you specify a namespace you can specify any combination of
these three values and the library will reconcile the triple, overriding the
provided values as appropriate.
Finally, if no namespacing is specified the library will make use of the
``/services`` branch of the REST API, which provides a namespaced view of
Splunk resources equivelent to using ``owner={currentUser}`` and
``app={defaultApp}``.
The ``namespace`` function returns a representation of the namespace from
reconciling the values you provide. It ignores any keyword arguments other
than ``owner``, ``app``, and ``sharing``, so you can provide ``dicts`` of
configuration information without first having to extract individual keys.
:param sharing: The sharing mode (the default is "user").
:type sharing: "system", "global", "app", or "user"
:param owner: The owner context (the default is "None").
:type owner: ``string``
:param app: The app context (the default is "None").
:type app: ``string``
:returns: A :class:`splunklib.data.Record` containing the reconciled
namespace.
**Example**::
import splunklib.binding as binding
n = binding.namespace(sharing="user", owner="boris", app="search")
n = binding.namespace(sharing="global", app="search")
"""
if sharing in ["system"]:
return record({'sharing': sharing, 'owner': "nobody", 'app': "system" })
if sharing in ["global", "app"]:
return record({'sharing': sharing, 'owner': "nobody", 'app': app})
if sharing in ["user", None]:
return record({'sharing': sharing, 'owner': owner, 'app': app})
raise ValueError("Invalid value for argument: 'sharing'")
class Context(object):
"""This class represents a context that encapsulates a splunkd connection.
The ``Context`` class encapsulates the details of HTTP requests,
authentication, a default namespace, and URL prefixes to simplify access to
the REST API.
After creating a ``Context`` object, you must call its :meth:`login`
method before you can issue requests to splunkd. Or, use the :func:`connect`
function to create an already-authenticated ``Context`` object. You can
provide a session token explicitly (the same token can be shared by multiple
``Context`` objects) to provide authentication.
:param host: The host name (the default is "localhost").
:type host: ``string``
:param port: The port number (the default is 8089).
:type port: ``integer``
:param scheme: The scheme for accessing the service (the default is "https").
:type scheme: "https" or "http"
:param sharing: The sharing mode for the namespace (the default is "user").
:type sharing: "global", "system", "app", or "user"
:param owner: The owner context of the namespace (optional, the default is "None").
:type owner: ``string``
:param app: The app context of the namespace (optional, the default is "None").
:type app: ``string``
:param token: A session token. When provided, you don't need to call :meth:`login`.
:type token: ``string``
:param cookie: A session cookie. When provided, you don't need to call :meth:`login`.
This parameter is only supported for Splunk 6.2+.
:type cookie: ``string``
:param username: The Splunk account username, which is used to
authenticate the Splunk instance.
:type username: ``string``
:param password: The password for the Splunk account.
:type password: ``string``
:param handler: The HTTP request handler (optional).
:returns: A ``Context`` instance.
**Example**::
import splunklib.binding as binding
c = binding.Context(username="boris", password="natasha", ...)
c.login()
# Or equivalently
c = binding.connect(username="boris", password="natasha")
# Or if you already have a session token
c = binding.Context(token="atg232342aa34324a")
# Or if you already have a valid cookie
c = binding.Context(cookie="splunkd_8089=...")
"""
def __init__(self, handler=None, **kwargs):
self.http = HttpLib(handler)
self.token = kwargs.get("token", _NoAuthenticationToken)
if self.token is None: # In case someone explicitly passes token=None
self.token = _NoAuthenticationToken
self.scheme = kwargs.get("scheme", DEFAULT_SCHEME)
self.host = kwargs.get("host", DEFAULT_HOST)
self.port = int(kwargs.get("port", DEFAULT_PORT))
self.authority = _authority(self.scheme, self.host, self.port)
self.namespace = namespace(**kwargs)
self.username = kwargs.get("username", "")
self.password = kwargs.get("password", "")
self.autologin = kwargs.get("autologin", False)
# Store any cookies in the self.http._cookies dict
if kwargs.has_key("cookie") and kwargs['cookie'] not in [None, _NoAuthenticationToken]:
_parse_cookies(kwargs["cookie"], self.http._cookies)
def get_cookies(self):
"""Gets the dictionary of cookies from the ``HttpLib`` member of this instance.
:return: Dictionary of cookies stored on the ``self.http``.
:rtype: ``dict``
"""
return self.http._cookies
def has_cookies(self):
"""Returns true if the ``HttpLib`` member of this instance has at least
one cookie stored.
:return: ``True`` if there is at least one cookie, else ``False``
:rtype: ``bool``
"""
return len(self.get_cookies()) > 0
# Shared per-context request headers
@property
def _auth_headers(self):
"""Headers required to authenticate a request.
Assumes your ``Context`` already has a authentication token or
cookie, either provided explicitly or obtained by logging
into the Splunk instance.
:returns: A list of 2-tuples containing key and value
"""
if self.has_cookies():
return [("Cookie", _make_cookie_header(self.get_cookies().items()))]
elif self.token is _NoAuthenticationToken:
return []
else:
# Ensure the token is properly formatted
if self.token.startswith('Splunk '):
token = self.token
else:
token = 'Splunk %s' % self.token
return [("Authorization", token)]
def connect(self):
"""Returns an open connection (socket) to the Splunk instance.
This method is used for writing bulk events to an index or similar tasks
where the overhead of opening a connection multiple times would be
prohibitive.
:returns: A socket.
**Example**::
import splunklib.binding as binding
c = binding.connect(...)
socket = c.connect()
socket.write("POST %s HTTP/1.1\\r\\n" % "some/path/to/post/to")
socket.write("Host: %s:%s\\r\\n" % (c.host, c.port))
socket.write("Accept-Encoding: identity\\r\\n")
socket.write("Authorization: %s\\r\\n" % c.token)
socket.write("X-Splunk-Input-Mode: Streaming\\r\\n")
socket.write("\\r\\n")
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.scheme == "https":
sock = ssl.wrap_socket(sock)
sock.connect((socket.gethostbyname(self.host), self.port))
return sock
@_authentication
@_log_duration
def delete(self, path_segment, owner=None, app=None, sharing=None, **query):
"""Performs a DELETE operation at the REST path segment with the given
namespace and query.
This method is named to match the HTTP method. ``delete`` makes at least
one round trip to the server, one additional round trip for each 303
status returned, and at most two additional round trips if
the ``autologin`` field of :func:`connect` is set to ``True``.
If *owner*, *app*, and *sharing* are omitted, this method uses the
default :class:`Context` namespace. All other keyword arguments are
included in the URL as query parameters.
:raises AuthenticationError: Raised when the ``Context`` object is not
logged in.
:raises HTTPError: Raised when an error occurred in a GET operation from
*path_segment*.
:param path_segment: A REST path segment.
:type path_segment: ``string``
:param owner: The owner context of the namespace (optional).
:type owner: ``string``
:param app: The app context of the namespace (optional).
:type app: ``string``
:param sharing: The sharing mode of the namespace (optional).
:type sharing: ``string``
:param query: All other keyword arguments, which are used as query
parameters.
:type query: ``string``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
**Example**::
c = binding.connect(...)
c.delete('saved/searches/boris') == \\
{'body': ...a response reader object...,
'headers': [('content-length', '1786'),
('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'),
('server', 'Splunkd'),
('connection', 'close'),
('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'),
('date', 'Fri, 11 May 2012 16:53:06 GMT'),
('content-type', 'text/xml; charset=utf-8')],
'reason': 'OK',
'status': 200}
c.delete('nonexistant/path') # raises HTTPError
c.logout()
c.delete('apps/local') # raises AuthenticationError
"""
path = self.authority + self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
logging.debug("DELETE request to %s (body: %s)", path, repr(query))
response = self.http.delete(path, self._auth_headers, **query)
return response
@_authentication
@_log_duration
def get(self, path_segment, owner=None, app=None, sharing=None, **query):
"""Performs a GET operation from the REST path segment with the given
namespace and query.
This method is named to match the HTTP method. ``get`` makes at least
one round trip to the server, one additional round trip for each 303
status returned, and at most two additional round trips if
the ``autologin`` field of :func:`connect` is set to ``True``.
If *owner*, *app*, and *sharing* are omitted, this method uses the
default :class:`Context` namespace. All other keyword arguments are
included in the URL as query parameters.
:raises AuthenticationError: Raised when the ``Context`` object is not
logged in.
:raises HTTPError: Raised when an error occurred in a GET operation from
*path_segment*.
:param path_segment: A REST path segment.
:type path_segment: ``string``
:param owner: The owner context of the namespace (optional).
:type owner: ``string``
:param app: The app context of the namespace (optional).
:type app: ``string``
:param sharing: The sharing mode of the namespace (optional).
:type sharing: ``string``
:param query: All other keyword arguments, which are used as query
parameters.
:type query: ``string``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
**Example**::
c = binding.connect(...)
c.get('apps/local') == \\
{'body': ...a response reader object...,
'headers': [('content-length', '26208'),
('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'),
('server', 'Splunkd'),
('connection', 'close'),
('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'),
('date', 'Fri, 11 May 2012 16:30:35 GMT'),
('content-type', 'text/xml; charset=utf-8')],
'reason': 'OK',
'status': 200}
c.get('nonexistant/path') # raises HTTPError
c.logout()
c.get('apps/local') # raises AuthenticationError
"""
path = self.authority + self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
logging.debug("GET request to %s (body: %s)", path, repr(query))
response = self.http.get(path, self._auth_headers, **query)
return response
@_authentication
@_log_duration
def post(self, path_segment, owner=None, app=None, sharing=None, headers=None, **query):
"""Performs a POST operation from the REST path segment with the given
namespace and query.
This method is named to match the HTTP method. ``post`` makes at least
one round trip to the server, one additional round trip for each 303
status returned, and at most two additional round trips if
the ``autologin`` field of :func:`connect` is set to ``True``.
If *owner*, *app*, and *sharing* are omitted, this method uses the
default :class:`Context` namespace. All other keyword arguments are
included in the URL as query parameters.
Some of Splunk's endpoints, such as ``receivers/simple`` and
``receivers/stream``, require unstructured data in the POST body
and all metadata passed as GET-style arguments. If you provide
a ``body`` argument to ``post``, it will be used as the POST
body, and all other keyword arguments will be passed as
GET-style arguments in the URL.
:raises AuthenticationError: Raised when the ``Context`` object is not
logged in.
:raises HTTPError: Raised when an error occurred in a GET operation from
*path_segment*.
:param path_segment: A REST path segment.
:type path_segment: ``string``
:param owner: The owner context of the namespace (optional).
:type owner: ``string``
:param app: The app context of the namespace (optional).
:type app: ``string``
:param sharing: The sharing mode of the namespace (optional).
:type sharing: ``string``
:param headers: List of extra HTTP headers to send (optional).
:type headers: ``list`` of 2-tuples.
:param query: All other keyword arguments, which are used as query
parameters.
:type query: ``string``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
**Example**::
c = binding.connect(...)
c.post('saved/searches', name='boris',
search='search * earliest=-1m | head 1') == \\
{'body': ...a response reader object...,
'headers': [('content-length', '10455'),
('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'),
('server', 'Splunkd'),
('connection', 'close'),
('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'),
('date', 'Fri, 11 May 2012 16:46:06 GMT'),
('content-type', 'text/xml; charset=utf-8')],
'reason': 'Created',
'status': 201}
c.post('nonexistant/path') # raises HTTPError
c.logout()
# raises AuthenticationError:
c.post('saved/searches', name='boris',
search='search * earliest=-1m | head 1')
"""
if headers is None:
headers = []
path = self.authority + self._abspath(path_segment, owner=owner, app=app, sharing=sharing)
logging.debug("POST request to %s (body: %s)", path, repr(query))
all_headers = headers + self._auth_headers
response = self.http.post(path, all_headers, **query)
return response
@_authentication
@_log_duration
def request(self, path_segment, method="GET", headers=None, body="",
owner=None, app=None, sharing=None):
"""Issues an arbitrary HTTP request to the REST path segment.
This method is named to match ``httplib.request``. This function
makes a single round trip to the server.
If *owner*, *app*, and *sharing* are omitted, this method uses the
default :class:`Context` namespace. All other keyword arguments are
included in the URL as query parameters.
:raises AuthenticationError: Raised when the ``Context`` object is not
logged in.
:raises HTTPError: Raised when an error occurred in a GET operation from
*path_segment*.
:param path_segment: A REST path segment.
:type path_segment: ``string``
:param method: The HTTP method to use (optional).
:type method: ``string``
:param headers: List of extra HTTP headers to send (optional).
:type headers: ``list`` of 2-tuples.
:param body: Content of the HTTP request (optional).
:type body: ``string``
:param owner: The owner context of the namespace (optional).
:type owner: ``string``
:param app: The app context of the namespace (optional).
:type app: ``string``
:param sharing: The sharing mode of the namespace (optional).
:type sharing: ``string``
:param query: All other keyword arguments, which are used as query
parameters.
:type query: ``string``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
**Example**::
c = binding.connect(...)
c.request('saved/searches', method='GET') == \\
{'body': ...a response reader object...,
'headers': [('content-length', '46722'),
('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'),
('server', 'Splunkd'),
('connection', 'close'),
('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'),
('date', 'Fri, 11 May 2012 17:24:19 GMT'),
('content-type', 'text/xml; charset=utf-8')],
'reason': 'OK',
'status': 200}
c.request('nonexistant/path', method='GET') # raises HTTPError
c.logout()
c.get('apps/local') # raises AuthenticationError
"""
if headers is None:
headers = []
path = self.authority \
+ self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
all_headers = headers + self._auth_headers
logging.debug("%s request to %s (headers: %s, body: %s)",
method, path, str(all_headers), repr(body))
response = self.http.request(path,
{'method': method,
'headers': all_headers,
'body': body})
return response
def login(self):
"""Logs into the Splunk instance referred to by the :class:`Context`
object.
Unless a ``Context`` is created with an explicit authentication token
(probably obtained by logging in from a different ``Context`` object)
you must call :meth:`login` before you can issue requests.
The authentication token obtained from the server is stored in the
``token`` field of the ``Context`` object.
:raises AuthenticationError: Raised when login fails.
:returns: The ``Context`` object, so you can chain calls.
**Example**::
import splunklib.binding as binding
c = binding.Context(...).login()
# Then issue requests...
"""
if self.has_cookies() and \
(not self.username and not self.password):
# If we were passed session cookie(s), but no username or
# password, then login is a nop, since we're automatically
# logged in.
return
if self.token is not _NoAuthenticationToken and \
(not self.username and not self.password):
# If we were passed a session token, but no username or
# password, then login is a nop, since we're automatically
# logged in.
return
# Only try to get a token and updated cookie if username & password are specified
try:
response = self.http.post(
self.authority + self._abspath("/services/auth/login"),
username=self.username,
password=self.password,
cookie="1") # In Splunk 6.2+, passing "cookie=1" will return the "set-cookie" header
body = response.body.read()
session = XML(body).findtext("./sessionKey")
self.token = "Splunk %s" % session
return self
except HTTPError as he:
if he.status == 401:
raise AuthenticationError("Login failed.", he)
else:
raise
def logout(self):
"""Forgets the current session token, and cookies."""
self.token = _NoAuthenticationToken
self.http._cookies = {}
return self
def _abspath(self, path_segment,
owner=None, app=None, sharing=None):
"""Qualifies *path_segment* into an absolute path for a URL.
If *path_segment* is already absolute, returns it unchanged.
If *path_segment* is relative, then qualifies it with either
the provided namespace arguments or the ``Context``'s default
namespace. Any forbidden characters in *path_segment* are URL
encoded. This function has no network activity.
Named to be consistent with RFC2396_.
.. _RFC2396: http://www.ietf.org/rfc/rfc2396.txt
:param path_segment: A relative or absolute URL path segment.
:type path_segment: ``string``
:param owner, app, sharing: Components of a namespace (defaults
to the ``Context``'s namespace if all
three are omitted)
:type owner, app, sharing: ``string``
:return: A ``UrlEncoded`` (a subclass of ``str``).
:rtype: ``string``
**Example**::
import splunklib.binding as binding
c = binding.connect(owner='boris', app='search', sharing='user')
c._abspath('/a/b/c') == '/a/b/c'
c._abspath('/a/b c/d') == '/a/b%20c/d'
c._abspath('apps/local/search') == \
'/servicesNS/boris/search/apps/local/search'
c._abspath('apps/local/search', sharing='system') == \
'/servicesNS/nobody/system/apps/local/search'
url = c.authority + c._abspath('apps/local/sharing')
"""
skip_encode = isinstance(path_segment, UrlEncoded)
# If path_segment is absolute, escape all forbidden characters
# in it and return it.
if path_segment.startswith('/'):
return UrlEncoded(path_segment, skip_encode=skip_encode)
# path_segment is relative, so we need a namespace to build an
# absolute path.
if owner or app or sharing:
ns = namespace(owner=owner, app=app, sharing=sharing)
else:
ns = self.namespace
# If no app or owner are specified, then use the /services
# endpoint. Otherwise, use /servicesNS with the specified
# namespace. If only one of app and owner is specified, use
# '-' for the other.
if ns.app is None and ns.owner is None:
return UrlEncoded("/services/%s" % path_segment, skip_encode=skip_encode)
oname = "nobody" if ns.owner is None else ns.owner
aname = "system" if ns.app is None else ns.app
path = UrlEncoded("/servicesNS/%s/%s/%s" % (oname, aname, path_segment),
skip_encode=skip_encode)
return path
def connect(**kwargs):
"""This function returns an authenticated :class:`Context` object.
This function is a shorthand for calling :meth:`Context.login`.
This function makes one round trip to the server.
:param host: The host name (the default is "localhost").
:type host: ``string``
:param port: The port number (the default is 8089).
:type port: ``integer``
:param scheme: The scheme for accessing the service (the default is "https").
:type scheme: "https" or "http"
:param owner: The owner context of the namespace (the default is "None").
:type owner: ``string``
:param app: The app context of the namespace (the default is "None").
:type app: ``string``
:param sharing: The sharing mode for the namespace (the default is "user").
:type sharing: "global", "system", "app", or "user"
:param token: The current session token (optional). Session tokens can be
shared across multiple service instances.
:type token: ``string``
:param cookie: A session cookie. When provided, you don't need to call :meth:`login`.
This parameter is only supported for Splunk 6.2+.
:type cookie: ``string``
:param username: The Splunk account username, which is used to
authenticate the Splunk instance.
:type username: ``string``
:param password: The password for the Splunk account.
:type password: ``string``
:param autologin: When ``True``, automatically tries to log in again if the
session terminates.
:type autologin: ``Boolean``
:return: An initialized :class:`Context` instance.
**Example**::
import splunklib.binding as binding
c = binding.connect(...)
response = c.get("apps/local")
"""
c = Context(**kwargs)
c.login()
return c
# Note: the error response schema supports multiple messages but we only
# return the first, although we do return the body so that an exception
# handler that wants to read multiple messages can do so.
class HTTPError(Exception):
"""This exception is raised for HTTP responses that return an error."""
def __init__(self, response, _message=None):
status = response.status
reason = response.reason
body = response.body.read()
try:
detail = XML(body).findtext("./messages/msg")
except ParseError as err:
detail = body
message = "HTTP %d %s%s" % (
status, reason, "" if detail is None else " -- %s" % detail)
Exception.__init__(self, _message or message)
self.status = status
self.reason = reason
self.headers = response.headers
self.body = body
self._response = response
class AuthenticationError(HTTPError):
"""Raised when a login request to Splunk fails.
If your username was unknown or you provided an incorrect password
in a call to :meth:`Context.login` or :meth:`splunklib.client.Service.login`,
this exception is raised.
"""
def __init__(self, message, cause):
# Put the body back in the response so that HTTPError's constructor can
# read it again.
cause._response.body = StringIO(cause.body)
HTTPError.__init__(self, cause._response, message)
#
# The HTTP interface used by the Splunk binding layer abstracts the underlying
# HTTP library using request & response 'messages' which are implemented as
# dictionaries with the following structure:
#
# # HTTP request message (only method required)
# request {
# method : str,
# headers? : [(str, str)*],
# body? : str,
# }
#
# # HTTP response message (all keys present)
# response {
# status : int,
# reason : str,
# headers : [(str, str)*],
# body : file,
# }
#
# Encode the given kwargs as a query string. This wrapper will also _encode
# a list value as a sequence of assignemnts to the corresponding arg name,
# for example an argument such as 'foo=[1,2,3]' will be encoded as
# 'foo=1&foo=2&foo=3'.
def _encode(**kwargs):
items = []
for key, value in kwargs.iteritems():
if isinstance(value, list):
items.extend([(key, item) for item in value])
else:
items.append((key, value))
return urllib.urlencode(items)
# Crack the given url into (scheme, host, port, path)
def _spliturl(url):
scheme, opaque = urllib.splittype(url)
netloc, path = urllib.splithost(opaque)
host, port = urllib.splitport(netloc)
# Strip brackets if its an IPv6 address
if host.startswith('[') and host.endswith(']'): host = host[1:-1]
if port is None: port = DEFAULT_PORT
return scheme, host, port, path
# Given an HTTP request handler, this wrapper objects provides a related
# family of convenience methods built using that handler.
class HttpLib(object):
"""A set of convenient methods for making HTTP calls.
``HttpLib`` provides a general :meth:`request` method, and :meth:`delete`,
:meth:`post`, and :meth:`get` methods for the three HTTP methods that Splunk
uses.
By default, ``HttpLib`` uses Python's built-in ``httplib`` library,
but you can replace it by passing your own handling function to the
constructor for ``HttpLib``.
The handling function should have the type:
``handler(`url`, `request_dict`) -> response_dict``
where `url` is the URL to make the request to (including any query and
fragment sections) as a dictionary with the following keys:
- method: The method for the request, typically ``GET``, ``POST``, or ``DELETE``.
- headers: A list of pairs specifying the HTTP headers (for example: ``[('key': value), ...]``).
- body: A string containing the body to send with the request (this string
should default to '').
and ``response_dict`` is a dictionary with the following keys:
- status: An integer containing the HTTP status code (such as 200 or 404).
- reason: The reason phrase, if any, returned by the server.
- headers: A list of pairs containing the response headers (for example, ``[('key': value), ...]``).
- body: A stream-like object supporting ``read(size=None)`` and ``close()``
methods to get the body of the response.
The response dictionary is returned directly by ``HttpLib``'s methods with
no further processing. By default, ``HttpLib`` calls the :func:`handler` function
to get a handler function.
"""
def __init__(self, custom_handler=None):
self.handler = handler() if custom_handler is None else custom_handler
self._cookies = {}
def delete(self, url, headers=None, **kwargs):
"""Sends a DELETE request to a URL.
:param url: The URL.
:type url: ``string``
:param headers: A list of pairs specifying the headers for the HTTP
response (for example, ``[('Content-Type': 'text/cthulhu'), ('Token': 'boris')]``).
:type headers: ``list``
:param kwargs: Additional keyword arguments (optional). These arguments
are interpreted as the query part of the URL. The order of keyword
arguments is not preserved in the request, but the keywords and
their arguments will be URL encoded.
:type kwargs: ``dict``
:returns: A dictionary describing the response (see :class:`HttpLib` for
its structure).
:rtype: ``dict``
"""
if headers is None: headers = []
if kwargs:
# url is already a UrlEncoded. We have to manually declare
# the query to be encoded or it will get automatically URL
# encoded by being appended to url.
url = url + UrlEncoded('?' + _encode(**kwargs), skip_encode=True)
message = {
'method': "DELETE",
'headers': headers,
}
return self.request(url, message)
def get(self, url, headers=None, **kwargs):
"""Sends a GET request to a URL.
:param url: The URL.
:type url: ``string``
:param headers: A list of pairs specifying the headers for the HTTP
response (for example, ``[('Content-Type': 'text/cthulhu'), ('Token': 'boris')]``).
:type headers: ``list``
:param kwargs: Additional keyword arguments (optional). These arguments
are interpreted as the query part of the URL. The order of keyword
arguments is not preserved in the request, but the keywords and
their arguments will be URL encoded.
:type kwargs: ``dict``
:returns: A dictionary describing the response (see :class:`HttpLib` for
its structure).
:rtype: ``dict``
"""
if headers is None: headers = []
if kwargs:
# url is already a UrlEncoded. We have to manually declare
# the query to be encoded or it will get automatically URL
# encoded by being appended to url.
url = url + UrlEncoded('?' + _encode(**kwargs), skip_encode=True)
return self.request(url, { 'method': "GET", 'headers': headers })
def post(self, url, headers=None, **kwargs):
"""Sends a POST request to a URL.
:param url: The URL.
:type url: ``string``
:param headers: A list of pairs specifying the headers for the HTTP
response (for example, ``[('Content-Type': 'text/cthulhu'), ('Token': 'boris')]``).
:type headers: ``list``
:param kwargs: Additional keyword arguments (optional). If the argument
is ``body``, the value is used as the body for the request, and the
keywords and their arguments will be URL encoded. If there is no
``body`` keyword argument, all the keyword arguments are encoded
into the body of the request in the format ``x-www-form-urlencoded``.
:type kwargs: ``dict``
:returns: A dictionary describing the response (see :class:`HttpLib` for
its structure).
:rtype: ``dict``
"""
if headers is None: headers = []
headers.append(("Content-Type", "application/x-www-form-urlencoded")),
# We handle GET-style arguments and an unstructured body. This is here
# to support the receivers/stream endpoint.
if 'body' in kwargs:
body = kwargs.pop('body')
if len(kwargs) > 0:
url = url + UrlEncoded('?' + _encode(**kwargs), skip_encode=True)
else:
body = _encode(**kwargs)
message = {
'method': "POST",
'headers': headers,
'body': body
}
return self.request(url, message)
def request(self, url, message, **kwargs):
"""Issues an HTTP request to a URL.
:param url: The URL.
:type url: ``string``
:param message: A dictionary with the format as described in
:class:`HttpLib`.
:type message: ``dict``
:param kwargs: Additional keyword arguments (optional). These arguments
are passed unchanged to the handler.
:type kwargs: ``dict``
:returns: A dictionary describing the response (see :class:`HttpLib` for
its structure).
:rtype: ``dict``
"""
response = self.handler(url, message, **kwargs)
response = record(response)
if 400 <= response.status:
raise HTTPError(response)
# Update the cookie with any HTTP request
# Initially, assume list of 2-tuples
key_value_tuples = response.headers
# If response.headers is a dict, get the key-value pairs as 2-tuples
# this is the case when using urllib2
if isinstance(response.headers, dict):
key_value_tuples = response.headers.items()
for key, value in key_value_tuples:
if key.lower() == "set-cookie":
_parse_cookies(value, self._cookies)
return response
# Converts an httplib response into a file-like object.
class ResponseReader(io.RawIOBase):
"""This class provides a file-like interface for :class:`httplib` responses.
The ``ResponseReader`` class is intended to be a layer to unify the different
types of HTTP libraries used with this SDK. This class also provides a
preview of the stream and a few useful predicates.
"""
# For testing, you can use a StringIO as the argument to
# ``ResponseReader`` instead of an ``httplib.HTTPResponse``. It
# will work equally well.
def __init__(self, response):
self._response = response
self._buffer = ''
def __str__(self):
return self.read()
@property
def empty(self):
"""Indicates whether there is any more data in the response."""
return self.peek(1) == ""
def peek(self, size):
"""Nondestructively retrieves a given number of characters.
The next :meth:`read` operation behaves as though this method was never
called.
:param size: The number of characters to retrieve.
:type size: ``integer``
"""
c = self.read(size)
self._buffer = self._buffer + c
return c
def close(self):
"""Closes this response."""
self._response.close()
def read(self, size = None):
"""Reads a given number of characters from the response.
:param size: The number of characters to read, or "None" to read the
entire response.
:type size: ``integer`` or "None"
"""
r = self._buffer
self._buffer = ''
if size is not None:
size -= len(r)
r = r + self._response.read(size)
return r
def readable(self):
""" Indicates that the response reader is readable."""
return True
def readinto(self, byte_array):
""" Read data into a byte array, upto the size of the byte array.
:param byte_array: A byte array/memory view to pour bytes into.
:type byte_array: ``bytearray`` or ``memoryview``
"""
max_size = len(byte_array)
data = self.read(max_size)
bytes_read = len(data)
byte_array[:bytes_read] = data
return bytes_read
def handler(key_file=None, cert_file=None, timeout=None):
"""This class returns an instance of the default HTTP request handler using
the values you provide.
:param `key_file`: A path to a PEM (Privacy Enhanced Mail) formatted file containing your private key (optional).
:type key_file: ``string``
:param `cert_file`: A path to a PEM (Privacy Enhanced Mail) formatted file containing a certificate chain file (optional).
:type cert_file: ``string``
:param `timeout`: The request time-out period, in seconds (optional).
:type timeout: ``integer`` or "None"
"""
def connect(scheme, host, port):
kwargs = {}
if timeout is not None: kwargs['timeout'] = timeout
if scheme == "http":
return httplib.HTTPConnection(host, port, **kwargs)
if scheme == "https":
if key_file is not None: kwargs['key_file'] = key_file
if cert_file is not None: kwargs['cert_file'] = cert_file
# If running Python 2.7.9+, disable SSL certificate validation
if sys.version_info >= (2,7,9) and key_file is None and cert_file is None:
kwargs['context'] = ssl._create_unverified_context()
return httplib.HTTPSConnection(host, port, **kwargs)
raise ValueError("unsupported scheme: %s" % scheme)
def request(url, message, **kwargs):
scheme, host, port, path = _spliturl(url)
body = message.get("body", "")
head = {
"Content-Length": str(len(body)),
"Host": host,
"User-Agent": "splunk-sdk-python/1.5.0",
"Accept": "*/*",
} # defaults
for key, value in message["headers"]:
head[key] = value
method = message.get("method", "GET")
connection = connect(scheme, host, port)
try:
connection.request(method, path, body, head)
if timeout is not None:
connection.sock.settimeout(timeout)
response = connection.getresponse()
finally:
connection.close()
return {
"status": response.status,
"reason": response.reason,
"headers": response.getheaders(),
"body": ResponseReader(response),
}
return request
|
jruaux/mongodb-monitoring
|
src/splunklib/binding.py
|
Python
|
apache-2.0
| 54,534
|
#!/usr/bin/env python
"""Import topic distributions and minute-by-minute topic coverage to SQL.
Usage: ./import_topics.py
"""
import ConfigParser
import csv
import os
from sqlalchemy import *
import sys
config = ConfigParser.RawConfigParser()
filedir = os.path.dirname(__file__)
config.read(os.path.join(filedir, '../moca.cfg'))
sys.path.insert(0, os.path.join(filedir, '..'))
from courses import courses
import heatmaps
db_prefix = 'mysql://%s:%s@%s/' % (
config.get('Database', 'username'),
config.get('Database', 'password'),
config.get('Database', 'host'))
engines = {course_id: create_engine(db_prefix + course_id, echo=True, pool_recycle=3600) for course_id in courses}
metadata = {course_id: MetaData(engine) for course_id, engine in engines.iteritems()}
# Tables by course id
moca_topics = {}
moca_topic_words = {}
moca_topic_coverage = {}
# Human-assigned topic names
rectified_topics = {
'textretrieval': {
0: 'document relevance',
1: 'search engine',
2: 'feedback relevance',
3: 'user',
6: 'natural language',
9: 'vector space model',
10: 'recommender systems',
11: 'indexing',
12: 'crawling framework',
13: 'precision/recall metrics',
14: 'human-in-the-loop support',
15: 'PageRank',
16: 'MapReduce',
},
'textanalytics': {
0: 'topic model',
1: 'information entropy',
2: 'retrieval model',
3: 'generative',
4: 'probabilistic model',
7: 'classification',
14: 'paradigmatic/syntagmatic relations',
},
}
# Reset `moca_topics` and `moca_topic_words` tables
for course_id in courses:
moca_topics[course_id] = Table('moca_topics', metadata[course_id],
Column('id', Integer, primary_key=True, autoincrement=False),
Column('name', String(100)),
Column('difficulty', Float))
moca_topic_words[course_id] = Table('moca_topic_words', metadata[course_id],
Column('topic_id', Integer, ForeignKey('moca_topics.id')),
Column('word', String(100)),
Column('phi', Float))
lecture_metadata = Table('lecture_metadata', metadata[course_id], autoload=True)
moca_topic_coverage[course_id] = Table('moca_topic_coverage', metadata[course_id],
Column('lecture_id', Integer, ForeignKey('lecture_metadata.id'), primary_key=True),
Column('minute', Integer, primary_key=True, autoincrement=False),
Column('topic_id', Integer, ForeignKey('moca_topics.id')))
moca_topic_coverage[course_id].drop(engines[course_id], checkfirst=True)
moca_topic_words[course_id].drop(engines[course_id], checkfirst=True)
moca_topics[course_id].drop(engines[course_id], checkfirst=True)
moca_topics[course_id].create(engines[course_id])
moca_topic_words[course_id].create(engines[course_id])
moca_topic_coverage[course_id].create(engines[course_id])
for course_id in courses:
connection = engines[course_id].connect()
with open(os.path.join(filedir, course_id + '_topics.txt')) as f:
topics = eval(f.read())
# Import topics and word distributions
for topic in topics:
topic_id = int(topic[0])
words = topic[1].split(' + ')
words = [word.split('*') for word in words]
words = [(float(word[0]), word[1]) for word in words]
if topic_id in rectified_topics[course_id]:
name = rectified_topics[course_id][topic_id]
else:
name = ' '.join([words[0][1], words[1][1], words[2][1]])
# Insert topic to `moca_topics`
topic_ins = moca_topics[course_id].insert().values(
id=topic_id,
name=name)
connection.execute(topic_ins)
# Insert all words to `moca_topic_words`
for word in words:
word_ins = moca_topic_words[course_id].insert().values(
topic_id=topic_id,
word=word[1],
phi=word[0])
connection.execute(word_ins)
# Import topic coverage by lecture x minute while simultaneously
# calculating topic difficulties
topic_num_events = [0] * len(topics)
topic_num_minutes = [0] * len(topics)
with open(os.path.join(filedir, course_id + '_lect_topics.csv')) as f:
reader = csv.reader(f)
for row in reader:
lecture_id = int(row[0])
lecture_topics = map(int, filter(None, row[1:]))
heatmap = heatmaps.get_heatmap(course_id, lecture_id)
for minute, topic_id in enumerate(lecture_topics):
cov_ins = moca_topic_coverage[course_id].insert().values(
lecture_id=lecture_id,
minute=minute,
topic_id=topic_id)
connection.execute(cov_ins)
try:
topic_num_events[topic_id] += heatmap[minute]
topic_num_minutes[topic_id] += 1
except IndexError:
continue
topic_difficulties = [float(e) / m for e, m in zip(topic_num_events, topic_num_minutes)]
for topic_id, difficulty in enumerate(topic_difficulties):
stmt = (moca_topics[course_id].update()
.where(moca_topics[course_id].c.id == topic_id)
.values(difficulty=difficulty))
connection.execute(stmt)
connection.close()
|
kevinwang/moca
|
topic_data/import_topics.py
|
Python
|
mit
| 5,387
|
from core.himesis import Himesis
import uuid
class HcopersonsSolveRefCountryFamilyChildCommunityMan(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule copersonsSolveRefCountryFamilyChildCommunityMan.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HcopersonsSolveRefCountryFamilyChildCommunityMan, self).__init__(name='HcopersonsSolveRefCountryFamilyChildCommunityMan', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """copersonsSolveRefCountryFamilyChildCommunityMan"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'copersonsSolveRefCountryFamilyChildCommunityMan')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
# match class Country() node
self.add_node()
self.vs[3]["mm__"] = """Country"""
self.vs[3]["attr1"] = """+"""
# match_contains node for class Country()
self.add_node()
self.vs[4]["mm__"] = """match_contains"""
# match class Family() node
self.add_node()
self.vs[5]["mm__"] = """Family"""
self.vs[5]["attr1"] = """+"""
# match_contains node for class Family()
self.add_node()
self.vs[6]["mm__"] = """match_contains"""
# match class Child() node
self.add_node()
self.vs[7]["mm__"] = """Child"""
self.vs[7]["attr1"] = """+"""
# match_contains node for class Child()
self.add_node()
self.vs[8]["mm__"] = """match_contains"""
# apply class Community() node
self.add_node()
self.vs[9]["mm__"] = """Community"""
self.vs[9]["attr1"] = """1"""
# apply_contains node for class Community()
self.add_node()
self.vs[10]["mm__"] = """apply_contains"""
# apply class Man() node
self.add_node()
self.vs[11]["mm__"] = """Man"""
self.vs[11]["attr1"] = """1"""
# apply_contains node for class Man()
self.add_node()
self.vs[12]["mm__"] = """apply_contains"""
# match association Country--families-->Family node
self.add_node()
self.vs[13]["attr1"] = """families"""
self.vs[13]["mm__"] = """directLink_S"""
# match association Family--sons-->Child node
self.add_node()
self.vs[14]["attr1"] = """sons"""
self.vs[14]["mm__"] = """directLink_S"""
# apply association Community--persons-->Man node
self.add_node()
self.vs[15]["attr1"] = """persons"""
self.vs[15]["mm__"] = """directLink_T"""
# backward association Country---->Community node
self.add_node()
self.vs[16]["mm__"] = """backward_link"""
# backward association Child---->Man node
self.add_node()
self.vs[17]["mm__"] = """backward_link"""
# Add the edges
self.add_edges([
(0,4), # matchmodel -> match_contains
(4,3), # match_contains -> match_class Country()
(0,6), # matchmodel -> match_contains
(6,5), # match_contains -> match_class Family()
(0,8), # matchmodel -> match_contains
(8,7), # match_contains -> match_class Child()
(1,10), # applymodel -> apply_contains
(10,9), # apply_contains -> apply_class Community()
(1,12), # applymodel -> apply_contains
(12,11), # apply_contains -> apply_class Man()
(3,13), # match_class Country() -> association families
(13,5), # association families -> match_class Family()
(5,14), # match_class Family() -> association sons
(14,7), # association sons -> match_class Child()
(9,15), # apply_class Community() -> association persons
(15,11), # association persons -> apply_class Man()
(9,16), # apply_class Community() -> backward_association
(16,3), # backward_association -> apply_class Country()
(11,17), # apply_class Man() -> backward_association
(17,7), # backward_association -> apply_class Child()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = []
|
levilucio/SyVOLT
|
ExFamToPerson/transformation/HcopersonsSolveRefCountryFamilyChildCommunityMan.py
|
Python
|
mit
| 4,965
|
import os
from parameterized import param, parameterized
from rsmtool.test_utils import check_run_cross_validation
# allow test directory to be set via an environment variable
# which is needed for package testing
TEST_DIR = os.environ.get('TESTDIR', None)
if TEST_DIR:
rsmtool_test_dir = TEST_DIR
else:
from rsmtool.test_utils import rsmtool_test_dir
@parameterized([
param('lr-xval', 'lr_xval'), # uses 5 folds if not specified
param('lr-xval-tsv', 'lr_xval_tsv', folds=3, file_format="tsv"),
param('lr-xval-xlsx', 'lr_xval_xlsx', folds=3, file_format="xlsx"),
param('lr-xval-folds-file', 'lr_xval_folds_file', folds=2), # folds file contain 2 folds
param('lr-xval-subgroups', 'lr_xval_subgroups', folds=3, subgroups=["QUESTION", "L1"]),
param('lr-xval-consistency', 'lr_xval_consistency', folds=3, consistency=True, subgroups=["L1"]),
param('lr-xval-skll-model', 'lr_xval_skll_model', folds=2, skll=True), # uses folds file
param('lr-xval-thumbnails', 'lr_xval_thumbnails', folds=3),
param('lr-xval-feature-list', 'lr_xval_feature_list', folds=3),
param('lr-xval-feature-subset-file', 'lr_xval_feature_subset_file', folds=3)
])
def test_run_cross_validation_parameterized(*args, **kwargs):
if TEST_DIR:
kwargs['given_test_dir'] = TEST_DIR
check_run_cross_validation(*args, **kwargs)
|
EducationalTestingService/rsmtool
|
tests/test_experiment_rsmxval.py
|
Python
|
apache-2.0
| 1,357
|
# coding: utf-8
"""
DragonPy - Dragon 32 emulator in Python
=======================================
:created: 2013 by Jens Diemer - www.jensdiemer.de
:copyleft: 2013-2015 by the DragonPy team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
from __future__ import absolute_import, division, print_function
import os
from dragonpy.core.configs import BaseConfig, SBC09
from dragonpy.sbc09.mem_info import get_sbc09_meminfo
from dragonpy.sbc09.periphery import SBC09Periphery
from dragonlib.api import CoCoAPI
from dragonpy.sbc09.sbc09_rom import SBC09Rom
class SBC09Cfg(BaseConfig):
"""
DragonPy config for Lennart's 6809 single board computer
Buggy machine language monitor and rudimentary O.S. version 1.0
More info read ./sbc09/README.creole
"""
CONFIG_NAME = SBC09
MACHINE_NAME="Lennart's 6809 single board computer"
RAM_START = 0x0000
RAM_END = 0x7FFF
# RAM size: 0x8000 == 32768 Bytes
ROM_START = 0x8000
ROM_END = 0xFFFF
# ROM size: 0x4000 == 16384 Bytes
BUS_ADDR_AREAS = (
(0xe000, 0xe001, "RS232 interface"), # emulated serial port (ACIA)
(0xFFF2, 0xFFFE, "Interrupt vectors"),
)
DEFAULT_ROMS = (
SBC09Rom(address=0x8000, max_size=None),
)
# Used in unittest for init the machine:
STARTUP_END_ADDR = 0xe45a # == O.S. routine to read a character into B register.
def __init__(self, cmd_args):
super(SBC09Cfg, self).__init__(cmd_args)
self.machine_api = CoCoAPI() # FIXME!
# if self.verbosity <= logging.INFO:
self.mem_info = get_sbc09_meminfo()
self.periphery_class = SBC09Periphery
config = SBC09Cfg
|
JuhaniImberg/DragonPy
|
dragonpy/sbc09/config.py
|
Python
|
gpl-3.0
| 1,744
|
import numpy as np
import os
from osgeo import gdal
from osgeo.gdalconst import *
import sys
from itertools import product
from datetime import datetime as dt
from scipy import optimize
def generate_thresholds(start, step, numberofsteps, lengthofelement):
end = numberofsteps * step + start
thresholdvals = range(start, end, step)
thresholdlists = []
for i in range(0, lengthofelement):
thresholdlists.append(thresholdvals)
for i in product(*thresholdlists):
yield i
def open_images(thresh, croparray, filelist, searchdir, searchstringsvals):
arrays = []
#print filelist
u = 0
for f, cropval in filelist:
img = gdal.Open(f, GA_ReadOnly)
if img is None:
raise Exception("Could not open: {0}".format(os.path.join(searchdir, f)))
else:
rows = img.RasterYSize
cols = img.RasterXSize
band = img.GetRasterBand(1)
array = band.ReadAsArray(0, 0, cols, rows)
#array[array > thresh[u]] = 10000
#print thresh[u]
arrays.append((np.copy(array), cropval))
band = ""
img = ""
u += 1
count = 0
finals = []
#with open(os.path.join(searchdir, "arrays.txt"), 'w') as text:
# for i, j in arrays:
# text.write(str(i)+"\n\n")
for array, cropval in arrays:
ltarrays = []
nodataarrays = []
for i in range(0, len(arrays)):
if not i == count:
lt = array.__lt__(arrays[i][0])
ltarrays.append(np.copy(lt))
ndarray = np.copy(array)
ndarray[ndarray != -3000] = 0
ndarray = np.rint(ndarray)
ndarray = ndarray.astype(int)
nodataarrays.append(ndarray)
count += 1
#with open(os.path.join(searchdir, "ltarrays.txt"), 'w') as text:
# for i in ltarrays:
# text.write(str(i)+"\n\n")
for i in range(0, len(ltarrays)):
if not i:
allpxbestfit = np.copy(ltarrays[i])
else:
allpxbestfit = allpxbestfit.__and__(ltarrays[i])
finals.append(cropval * allpxbestfit)
#with open(os.path.join(searchdir, "finals.txt"), 'w') as text:
# for i in finals:
# text.write(str(i))
nodataarray = ""
for ndarray in nodataarrays:
if nodataarray == "":
nodataarray = np.copy(ndarray)
else:
nodataarray = nodataarray.__and__(ndarray)
#print nodataarray
classification = ""
for final in finals:
if classification == "":
classification = np.copy(final)
else:
classification = classification.__or__(final)
classification = classification.__or__(nodataarray)
#print classification
#with open(os.path.join(searchdir, "classified.txt"), 'w') as text:
# text.write(str(classification))
#Accuracy Assessment
results = {}
for string, val in searchstringsvals:
dict = {}
temparray = np.copy(classification)
temparray[temparray != val] = 0
correct = temparray.__eq__(croparray)
incorrect = temparray.__ne__(croparray)
temparray[temparray == val] = 1
incorrectvals = incorrect.__mul__(croparray).__mul__(temparray)
for string2, val2 in searchstringsvals:
temparray2 = np.copy(incorrectvals)
temparray2[temparray2 != val2] = 0
dict[string2] = temparray2.sum() / val2
dict[string] = correct.sum()
dict["other"] = len([x for y in incorrectvals for x in y if not x in zip(*searchstringsvals)[1] and not x == 0])
results[string] = dict.copy()
dict = {}
temparray = np.copy(classification)
temparray[classification == 0] = 1
temparray[classification != 0] = 0
incorrectvals = temparray.__mul__(croparray)
for string2, val2 in searchstringsvals:
temparray2 = np.copy(incorrectvals)
temparray2[temparray2 != val2] = 0
dict[string2] = temparray2.sum() / val2
dict["other"] = len([x for y in incorrectvals for x in y if not x in zip(*searchstringsvals)[1] and not x == 0])
results["other"] = dict.copy()
numpx = 0
for key, val in results.items():
for k, v in val.items():
numpx += v
#print "\n"
#print results
#print numpx
printstring = ""
correct = 0
croporder = []
h = 0
for crop, values in results.items():
total = 0
vals = []
for crop2, pxcount in values.items():
total = total + pxcount
if not crop2 in croporder:
croporder.append(crop2)
if crop2 == crop:
correct += pxcount
vals.append(pxcount)
printstring = printstring + "{0}\t\t{1}\t{2}\n".format(crop, vals, total)
h = h + total
inverseaccuracy = 1 - (correct / (h * 1.0))
#outstring = ("{0}\n\t\t{1}\trow total\n{2}\n{3}\n\n\n".format(thresh, croporder, printstring, accuracy))
return inverseaccuracy
def main():
searchdir = "/Users/phoetrymaster/Documents/School/Geography/Thesis/Data/MODIS_KANSAS_2007-2012/reprojected/Classified/test1/fullpxonly/clip1refs/KansasNDVI_2012_clip1_SLSQP/"
outFile = os.path.join(searchdir, "classified.tif")
cropimgpath = "/Users/phoetrymaster/Documents/School/Geography/Thesis/Data/polygonclip_20130929223024_325071991/resampled/newclips/2012clip1.tif"
searchstringsvals = [("soy.", 5), ("wwheat.", 24), ("corn", 1), ("sorghum", 4), ("wwheatsoydbl", 26)]
#nodata = -3000
accuracyreport = os.path.join(searchdir, "accuracy.txt")
gdal.AllRegister()
#np.set_printoptions(threshold=np.nan)
#Crop image is constant for all iterations
cropimg = gdal.Open(cropimgpath, GA_ReadOnly)
if cropimg is None:
raise Exception("Could not open: {0}".format(cropimgpath))
else:
rows = cropimg.RasterYSize
cols = cropimg.RasterXSize
projection = cropimg.GetProjection()
transformation = cropimg.GetGeoTransform()
band = cropimg.GetRasterBand(1)
datatype = band.DataType
croparray = band.ReadAsArray(0, 0, cols, rows)
band =""
cropimg = ""
print "Opened crop img"
#
filelist = []
files = os.listdir(searchdir)
for f in files:
for string, val in searchstringsvals:
if f.endswith(".tif"):
if string in f:
filelist.append((os.path.join(searchdir, f), val))
#thresholds = generate_thresholds(500, 300, 4, len(filelist))
#writestring = ""
#bestacc = 0
#bestthresh = ""
rranges = (slice(500, 2000, 50), slice(500, 2000, 50), slice(500, 2000, 50), slice(500, 2000, 50), slice(500, 2000, 50))
x0 = np.array([800, 800, 800, 800, 800])
res = optimize.anneal(classify_with_threshold, x0, args=(croparray, filelist, searchdir, searchstringsvals))
print res
# for thresh in thresholds:
# start = dt.now()
# accuracy, classification, cols, rows, outstring, nodata = classify_with_threshold(croparray,
# filelist,
# searchdir, searchstringsvals,
# thresh)
# writestring = writestring + outstring
#
# if accuracy > bestacc:
# bestacc = accuracy
# bestthresh = thresh
#
# elapsed = dt.now() - start
# print thresh, elapsed, accuracy, bestacc
#
#except Exception as e:
# print e
#
#finally:
#
# with open(accuracyreport, 'w') as text:
# text.write("{0}\nBest:\n{1} {2}".format(writestring, bestthresh, bestacc))
#
# print "\n", bestthresh, bestacc
#
# accuracy, classification, cols, rows, outstring, nodata = classify_with_threshold(croparray,
# filelist,
# searchdir, searchstringsvals,
# bestthresh)
# driver = gdal.GetDriverByName("ENVI")
# driver.Register()
#
# outds = driver.Create(outFile, cols, rows, 1, GDT_Int16)
# outds.SetGeoTransform(transformation)
# outds.SetProjection(projection)
# outband = outds.GetRasterBand(1)
# outband.WriteArray(classification, 0, 0)
# outband.SetNoDataValue(-3000)
# outband.FlushCache()
#
# outband = ""
# outds = ""
#
# print "outputted"
if __name__ == '__main__':
sys.exit(main())
|
jkeifer/pyHytemporal
|
old_TO_MIGRATE/accuracy_assessment_minimized.py
|
Python
|
mit
| 9,021
|
import traceback
from _pydev_bundle.pydev_is_thread_alive import is_thread_alive
from _pydev_imps._pydev_saved_modules import threading
from _pydevd_bundle.pydevd_constants import get_current_thread_id, IS_IRONPYTHON, NO_FTRACE, IS_WINDOWS
from _pydevd_bundle.pydevd_dont_trace_files import DONT_TRACE
from _pydevd_bundle.pydevd_kill_all_pydevd_threads import kill_all_pydev_threads
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame, NORM_PATHS_AND_BASE_CONTAINER
from pydevd_tracing import SetTrace
# IFDEF CYTHON
# # In Cython, set_additional_thread_info is bundled in the file.
# from cpython.object cimport PyObject
# from cpython.ref cimport Py_INCREF, Py_XDECREF
# ELSE
from _pydevd_bundle.pydevd_additional_thread_info import set_additional_thread_info
from _pydevd_bundle.pydevd_frame import PyDBFrame
# ENDIF
from os.path import basename, splitext
from _pydevd_bundle.pydevd_breakpoints import stop_on_unhandled_exception
from _pydevd_bundle.pydevd_collect_try_except_info import collect_try_except_info
threadingCurrentThread = threading.currentThread
get_file_type = DONT_TRACE.get
# Note: this is different from pydevd_constants.thread_get_ident because we want Jython
# to be None here because it also doesn't have threading._active.
try:
threading_get_ident = threading.get_ident # Python 3
except:
try:
threading_get_ident = threading._get_ident # Python 2
except:
threading_get_ident = None # Jython
# IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)
# cdef dict global_cache_skips
# cdef dict global_cache_frame_skips
# ELSE
# ENDIF
# Cache where we should keep that we completely skipped entering some context.
# It needs to be invalidated when:
# - Breakpoints are changed
# It can be used when running regularly (without step over/step in/step return)
global_cache_skips = {}
global_cache_frame_skips = {}
# IFDEF CYTHON
# cdef class SafeCallWrapper:
# cdef method_object
# def __init__(self, method_object):
# self.method_object = method_object
# def __call__(self, *args):
# #Cannot use 'self' once inside the delegate call since we are borrowing the self reference f_trace field
# #in the frame, and that reference might get destroyed by set trace on frame and parents
# cdef PyObject* method_obj = <PyObject*> self.method_object
# Py_INCREF(<object>method_obj)
# ret = (<object>method_obj)(*args)
# Py_XDECREF (method_obj)
# return SafeCallWrapper(ret) if ret is not None else None
# ELSE
# ENDIF
def fix_top_level_trace_and_get_trace_func(py_db, frame):
# IFDEF CYTHON
# cdef str filename;
# cdef str name;
# cdef tuple args;
# ENDIF
# Note: this is always the first entry-point in the tracing for any thread.
# After entering here we'll set a new tracing function for this thread
# where more information is cached (and will also setup the tracing for
# frames where we should deal with unhandled exceptions).
thread = None
# Cache the frame which should be traced to deal with unhandled exceptions.
# (i.e.: thread entry-points).
f_unhandled = frame
# print('called at', f_unhandled.f_code.co_name, f_unhandled.f_code.co_filename, f_unhandled.f_code.co_firstlineno)
force_only_unhandled_tracer = False
while f_unhandled is not None:
filename = f_unhandled.f_code.co_filename
name = splitext(basename(filename))[0]
if name == 'threading':
if f_unhandled.f_code.co_name in ('__bootstrap', '_bootstrap'):
# We need __bootstrap_inner, not __bootstrap.
return None, False
elif f_unhandled.f_code.co_name in ('__bootstrap_inner', '_bootstrap_inner'):
# Note: be careful not to use threading.currentThread to avoid creating a dummy thread.
t = f_unhandled.f_locals.get('self')
force_only_unhandled_tracer = True
if t is not None and isinstance(t, threading.Thread):
thread = t
break
elif name == 'pydev_monkey':
if f_unhandled.f_code.co_name == '__call__':
force_only_unhandled_tracer = True
break
elif name == 'pydevd':
if f_unhandled.f_code.co_name in ('run', 'main'):
# We need to get to _exec
return None, False
if f_unhandled.f_code.co_name == '_exec':
force_only_unhandled_tracer = True
break
elif f_unhandled.f_back is None:
break
f_unhandled = f_unhandled.f_back
if thread is None:
# Important: don't call threadingCurrentThread if we're in the threading module
# to avoid creating dummy threads.
if threading_get_ident is not None:
thread = threading._active.get(threading_get_ident())
if thread is None:
if IS_WINDOWS and f_unhandled and not f_unhandled.f_code.co_filename.startswith('threading'):
# When attaching to a process on Windows, its main thread ID may not be in `threading._active`
# unless the module imports `threading` by its own.
thread = threadingCurrentThread()
else:
return None, False
else:
# Jython does not have threading.get_ident().
thread = threadingCurrentThread()
if getattr(thread, 'pydev_do_not_trace', None):
SetTrace(None)
return None, False
try:
additional_info = thread.additional_info
if additional_info is None:
raise AttributeError()
except:
additional_info = set_additional_thread_info(thread)
# print('enter thread tracer', thread, get_current_thread_id(thread))
args = (py_db, thread, additional_info, global_cache_skips, global_cache_frame_skips)
if f_unhandled is not None:
if f_unhandled.f_back is None and not force_only_unhandled_tracer:
# Happens when we attach to a running program.
top_level_thread_tracer = TopLevelThreadTracerNoBackFrame(ThreadTracer(args), args)
else:
# Stop in some internal place to report about unhandled exceptions
top_level_thread_tracer = TopLevelThreadTracerOnlyUnhandledExceptions(args)
# IFDEF CYTHON
# thread._top_level_thread_tracer = top_level_thread_tracer # Hack for cython to keep it alive while the thread is alive (just the method in the SetTrace is not enough).
# ELSE
# ENDIF
# print(' --> found to trace unhandled', f_unhandled.f_code.co_name, f_unhandled.f_code.co_filename, f_unhandled.f_code.co_firstlineno)
f_trace = top_level_thread_tracer.get_trace_dispatch_func()
# IFDEF CYTHON
# f_unhandled.f_trace = SafeCallWrapper(f_trace)
# ELSE
f_unhandled.f_trace = f_trace
# ENDIF
if frame is f_unhandled:
return f_unhandled.f_trace, False
thread_tracer = ThreadTracer(args)
# IFDEF CYTHON
# thread._tracer = thread_tracer # Hack for cython to keep it alive while the thread is alive (just the method in the SetTrace is not enough).
# ELSE
# ENDIF
return thread_tracer, True
# IFDEF CYTHON
# def trace_dispatch(py_db, frame, str event, arg):
# ELSE
def trace_dispatch(py_db, frame, event, arg):
# ENDIF
if fix_top_level_trace_and_get_trace_func is None or threadingCurrentThread is None or splitext is None:
# When the application is being exited with live daemon threads, it's possible that some
# of the names we require are already None, so, check that tokens we need are there.
# Code to diagnose where this happens below.
# msg = ''
# msg += 'fix_top_level_trace_and_get_trace_func: %s\n' % (fix_top_level_trace_and_get_trace_func,)
# msg += 'threadingCurrentThread: %s\n' % (threadingCurrentThread,)
# msg += 'splitext: %s\n' % (splitext,)
# while frame is not None:
# msg += 'location 1: %s %s %s=n' % (frame.f_lineno, frame.f_code.co_name, frame.f_code.co_filename)
# if 't' in frame.f_locals:
# t = frame.f_locals['t']
# if hasattr(t, 'run'):
# msg += 'Error 1 in thread with function: %s %s %s\n' % (t._Thread__target, t.run, t.__class__)
# t = None
#
# frame = frame.f_back
# print(msg)
return None
thread_trace_func, apply_to_settrace = fix_top_level_trace_and_get_trace_func(py_db, frame)
if thread_trace_func is None:
if event != 'call': frame.f_trace = NO_FTRACE
return None
if apply_to_settrace:
py_db.enable_tracing(thread_trace_func)
return thread_trace_func(frame, event, arg)
# IFDEF CYTHON
# cdef class TopLevelThreadTracerOnlyUnhandledExceptions:
# cdef public tuple _args;
# def __init__(self, tuple args):
# self._args = args
# ELSE
class TopLevelThreadTracerOnlyUnhandledExceptions:
def __init__(self, args):
self._args = args
# ENDIF
def trace_unhandled_exceptions(self, frame, event, arg):
# Note that we ignore the frame as this tracing method should only be put in topmost frames already.
# print('trace_unhandled_exceptions', event, frame.f_code.co_name, frame.f_code.co_filename, frame.f_code.co_firstlineno)
if event == 'exception' and arg is not None:
py_db, t, additional_info = self._args[0:3]
if arg is not None:
if not additional_info.suspended_at_unhandled:
additional_info.suspended_at_unhandled = True
stop_on_unhandled_exception(py_db, t, additional_info, arg)
# No need to reset frame.f_trace to keep the same trace function.
# IFDEF CYTHON
# return SafeCallWrapper(self.trace_unhandled_exceptions)
# ELSE
return self.trace_unhandled_exceptions
# ENDIF
def get_trace_dispatch_func(self):
return self.trace_unhandled_exceptions
# IFDEF CYTHON
# cdef class TopLevelThreadTracerNoBackFrame:
#
# cdef public object _frame_trace_dispatch;
# cdef public tuple _args;
# cdef public object _try_except_info;
# cdef public object _last_exc_arg;
# cdef public set _raise_lines;
# cdef public int _last_raise_line;
#
# def __init__(self, frame_trace_dispatch, tuple args):
# self._frame_trace_dispatch = frame_trace_dispatch
# self._args = args
# self._try_except_info = None
# self._last_exc_arg = None
# self._raise_lines = set()
# self._last_raise_line = -1
# ELSE
class TopLevelThreadTracerNoBackFrame:
'''
This tracer is pretty special in that it's dealing with a frame without f_back (i.e.: top frame
on remote attach or QThread).
This means that we have to carefully inspect exceptions to discover whether the exception will
be unhandled or not (if we're dealing with an unhandled exception we need to stop as unhandled,
otherwise we need to use the regular tracer -- unfortunately the debugger has little info to
work with in the tracing -- see: https://bugs.python.org/issue34099, so, we inspect bytecode to
determine if some exception will be traced or not... note that if this is not available -- such
as on Jython -- we consider any top-level exception to be unnhandled).
'''
def __init__(self, frame_trace_dispatch, args):
self._frame_trace_dispatch = frame_trace_dispatch
self._args = args
self._try_except_info = None
self._last_exc_arg = None
self._raise_lines = set()
self._last_raise_line = -1
# ENDIF
def trace_dispatch_and_unhandled_exceptions(self, frame, event, arg):
# print('trace_dispatch_and_unhandled_exceptions', event, frame.f_code.co_name, frame.f_code.co_filename, frame.f_code.co_firstlineno)
if self._frame_trace_dispatch is not None:
self._frame_trace_dispatch = self._frame_trace_dispatch(frame, event, arg)
if event == 'exception':
self._last_exc_arg = arg
self._raise_lines.add(frame.f_lineno)
self._last_raise_line = frame.f_lineno
elif event == 'return' and self._last_exc_arg is not None:
# For unhandled exceptions we actually track the return when at the topmost level.
try:
py_db, t, additional_info = self._args[0:3]
if not additional_info.suspended_at_unhandled: # Note: only check it here, don't set.
if frame.f_lineno in self._raise_lines:
stop_on_unhandled_exception(py_db, t, additional_info, self._last_exc_arg)
else:
if self._try_except_info is None:
self._try_except_info = collect_try_except_info(frame.f_code)
if not self._try_except_info:
# Consider the last exception as unhandled because there's no try..except in it.
stop_on_unhandled_exception(py_db, t, additional_info, self._last_exc_arg)
else:
# Now, consider only the try..except for the raise
valid_try_except_infos = []
for try_except_info in self._try_except_info:
if try_except_info.is_line_in_try_block(self._last_raise_line):
valid_try_except_infos.append(try_except_info)
if not valid_try_except_infos:
stop_on_unhandled_exception(py_db, t, additional_info, self._last_exc_arg)
else:
# Note: check all, not only the "valid" ones to cover the case
# in "pydev_tests_python.test_tracing_on_top_level.raise_unhandled10"
# where one try..except is inside the other with only a raise
# and it's gotten in the except line.
for try_except_info in self._try_except_info:
if try_except_info.is_line_in_except_block(frame.f_lineno):
if (
frame.f_lineno == try_except_info.except_line or
frame.f_lineno in try_except_info.raise_lines_in_except
):
# In a raise inside a try..except block or some except which doesn't
# match the raised exception.
stop_on_unhandled_exception(py_db, t, additional_info, self._last_exc_arg)
break
else:
break # exited during the except block (no exception raised)
finally:
# Remove reference to exception after handling it.
self._last_exc_arg = None
# IFDEF CYTHON
# ret = SafeCallWrapper(self.trace_dispatch_and_unhandled_exceptions)
# ELSE
ret = self.trace_dispatch_and_unhandled_exceptions
# ENDIF
# Need to reset (the call to _frame_trace_dispatch may have changed it).
frame.f_trace = ret
return ret
def get_trace_dispatch_func(self):
return self.trace_dispatch_and_unhandled_exceptions
# IFDEF CYTHON
# cdef class ThreadTracer:
# cdef public tuple _args;
# def __init__(self, tuple args):
# self._args = args
# ELSE
class ThreadTracer:
def __init__(self, args):
self._args = args
# ENDIF
def __call__(self, frame, event, arg):
''' This is the callback used when we enter some context in the debugger.
We also decorate the thread we are in with info about the debugging.
The attributes added are:
pydev_state
pydev_step_stop
pydev_step_cmd
pydev_notify_kill
:param PyDB py_db:
This is the global debugger (this method should actually be added as a method to it).
'''
# IFDEF CYTHON
# cdef str filename;
# cdef str base;
# cdef int pydev_step_cmd;
# cdef tuple frame_cache_key;
# cdef dict cache_skips;
# cdef bint is_stepping;
# cdef tuple abs_path_real_path_and_base;
# cdef PyDBAdditionalThreadInfo additional_info;
# ENDIF
# print('ENTER: trace_dispatch', frame.f_code.co_filename, frame.f_lineno, event, frame.f_code.co_name)
py_db, t, additional_info, cache_skips, frame_skips_cache = self._args
pydev_step_cmd = additional_info.pydev_step_cmd
is_stepping = pydev_step_cmd != -1
try:
if py_db._finish_debugging_session:
if not py_db._termination_event_set:
# that was not working very well because jython gave some socket errors
try:
if py_db.output_checker_thread is None:
kill_all_pydev_threads()
except:
traceback.print_exc()
py_db._termination_event_set = True
if event != 'call': frame.f_trace = NO_FTRACE
return None
# if thread is not alive, cancel trace_dispatch processing
if not is_thread_alive(t):
py_db.notify_thread_not_alive(get_current_thread_id(t))
if event != 'call': frame.f_trace = NO_FTRACE
return None # suspend tracing
if py_db.thread_analyser is not None:
py_db.thread_analyser.log_event(frame)
if py_db.asyncio_analyser is not None:
py_db.asyncio_analyser.log_event(frame)
# Note: it's important that the context name is also given because we may hit something once
# in the global context and another in the local context.
frame_cache_key = (frame.f_code.co_firstlineno, frame.f_code.co_name, frame.f_code.co_filename)
if not is_stepping and frame_cache_key in cache_skips:
# print('skipped: trace_dispatch (cache hit)', frame_cache_key, frame.f_lineno, event, frame.f_code.co_name)
if event != 'call': frame.f_trace = NO_FTRACE
return None
try:
# Make fast path faster!
abs_path_real_path_and_base = NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename]
except:
abs_path_real_path_and_base = get_abs_path_real_path_and_base_from_frame(frame)
filename = abs_path_real_path_and_base[1]
file_type = get_file_type(abs_path_real_path_and_base[-1]) # we don't want to debug threading or anything related to pydevd
if file_type is not None:
if file_type == 1: # inlining LIB_FILE = 1
if not py_db.in_project_scope(filename):
# print('skipped: trace_dispatch (not in scope)', abs_path_real_path_and_base[-1], frame.f_lineno, event, frame.f_code.co_name, file_type)
cache_skips[frame_cache_key] = 1
if event != 'call': frame.f_trace = NO_FTRACE
return None
else:
# print('skipped: trace_dispatch', abs_path_real_path_and_base[-1], frame.f_lineno, event, frame.f_code.co_name, file_type)
cache_skips[frame_cache_key] = 1
if event != 'call': frame.f_trace = NO_FTRACE
return None
if is_stepping:
if py_db.is_filter_enabled and py_db.is_ignored_by_filters(filename):
# ignore files matching stepping filters
if event != 'call': frame.f_trace = NO_FTRACE
return None
if py_db.is_filter_libraries and not py_db.in_project_scope(filename):
# ignore library files while stepping
if event != 'call': frame.f_trace = NO_FTRACE
return None
# print('trace_dispatch', base, frame.f_lineno, event, frame.f_code.co_name, file_type)
if additional_info.is_tracing:
if event != 'call': frame.f_trace = NO_FTRACE
return None # we don't wan't to trace code invoked from pydevd_frame.trace_dispatch
# Just create PyDBFrame directly (removed support for Python versions < 2.5, which required keeping a weak
# reference to the frame).
ret = PyDBFrame(
(
py_db, filename, additional_info, t, frame_skips_cache, frame_cache_key,
)
).trace_dispatch(frame, event, arg)
if ret is None:
cache_skips[frame_cache_key] = 1
if event != 'call': frame.f_trace = NO_FTRACE
return None
# IFDEF CYTHON
# ret = SafeCallWrapper(ret)
# ENDIF
frame.f_trace = ret # Make sure we keep the returned tracer.
return ret
except SystemExit:
if event != 'call': frame.f_trace = NO_FTRACE
return None
except Exception:
if py_db._finish_debugging_session:
if event != 'call': frame.f_trace = NO_FTRACE
return None # Don't log errors when we're shutting down.
# Log it
try:
if traceback is not None:
# This can actually happen during the interpreter shutdown in Python 2.7
traceback.print_exc()
except:
# Error logging? We're really in the interpreter shutdown...
# (https://github.com/fabioz/PyDev.Debugger/issues/8)
pass
if event != 'call': frame.f_trace = NO_FTRACE
return None
if IS_IRONPYTHON:
# This is far from ideal, as we'll leak frames (we'll always have the last created frame, not really
# the last topmost frame saved -- this should be Ok for our usage, but it may leak frames and things
# may live longer... as IronPython is garbage-collected, things should live longer anyways, so, it
# shouldn't be an issue as big as it's in CPython -- it may still be annoying, but this should
# be a reasonable workaround until IronPython itself is able to provide that functionality).
#
# See: https://github.com/IronLanguages/main/issues/1630
from _pydevd_bundle.pydevd_additional_thread_info_regular import _tid_to_last_frame
_original_call = ThreadTracer.__call__
def __call__(self, frame, event, arg):
_tid_to_last_frame[self._args[1].ident] = frame
return _original_call(self, frame, event, arg)
ThreadTracer.__call__ = __call__
|
jwren/intellij-community
|
python/helpers/pydev/_pydevd_bundle/pydevd_trace_dispatch_regular.py
|
Python
|
apache-2.0
| 23,323
|
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import
import warnings
# Import third party libs
import yaml
from yaml.nodes import MappingNode
from yaml.constructor import ConstructorError
try:
yaml.Loader = yaml.CLoader
yaml.Dumper = yaml.CDumper
except Exception:
pass
# This function is safe and needs to stay as yaml.load. The load function
# accepts a custom loader, and every time this function is used in Salt
# the custom loader defined below is used. This should be altered though to
# not require the custom loader to be explicitly added.
load = yaml.load # pylint: disable=C0103
class DuplicateKeyWarning(RuntimeWarning):
'''
Warned when duplicate keys exist
'''
warnings.simplefilter('always', category=DuplicateKeyWarning)
# with code integrated from https://gist.github.com/844388
class SaltYamlSafeLoader(yaml.SafeLoader, object):
'''
Create a custom YAML loader that uses the custom constructor. This allows
for the YAML loading defaults to be manipulated based on needs within salt
to make things like sls file more intuitive.
'''
def __init__(self, stream, dictclass=dict):
yaml.SafeLoader.__init__(self, stream)
if dictclass is not dict:
# then assume ordered dict and use it for both !map and !omap
self.add_constructor(
u'tag:yaml.org,2002:map',
type(self).construct_yaml_map)
self.add_constructor(
u'tag:yaml.org,2002:omap',
type(self).construct_yaml_map)
self.dictclass = dictclass
def construct_yaml_map(self, node):
data = self.dictclass()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_mapping(self, node, deep=False):
'''
Build the mapping for YAML
'''
if not isinstance(node, MappingNode):
raise ConstructorError(
None,
None,
'expected a mapping node, but found {0}'.format(node.id),
node.start_mark)
self.flatten_mapping(node)
mapping = self.dictclass()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError:
err = ('While constructing a mapping {0} found unacceptable '
'key {1}').format(node.start_mark, key_node.start_mark)
raise ConstructorError(err)
value = self.construct_object(value_node, deep=deep)
if key in mapping:
raise ConstructorError('Conflicting ID {0!r}'.format(key))
mapping[key] = value
return mapping
def construct_scalar(self, node):
'''
Verify integers and pass them in correctly is they are declared
as octal
'''
if node.tag == 'tag:yaml.org,2002:int':
if node.value == '0':
pass
elif node.value.startswith('0') and not node.value.startswith(('0b', '0x')):
node.value = node.value.lstrip('0')
# If value was all zeros, node.value would have been reduced to
# an empty string. Change it to '0'.
if node.value == '':
node.value = '0'
return super(SaltYamlSafeLoader, self).construct_scalar(node)
|
smallyear/linuxLearn
|
salt/salt/utils/yamlloader.py
|
Python
|
apache-2.0
| 3,478
|
from __future__ import absolute_import, print_function
import os
import sys
import re
from . import catalog
from . import build_tools
from . import converters
from . import base_spec
class ext_function_from_specs(object):
def __init__(self,name,code_block,arg_specs):
self.name = name
self.arg_specs = base_spec.arg_spec_list(arg_specs)
self.code_block = code_block
self.compiler = ''
self.customize = base_info.custom_info()
def header_code(self):
pass
def function_declaration_code(self):
code = 'static PyObject* %s(PyObject*self, PyObject* args,' \
' PyObject* kywds)\n{\n'
return code % self.name
def template_declaration_code(self):
code = 'template<class T>\n' \
'static PyObject* %s(PyObject*self, PyObject* args,' \
' PyObject* kywds)\n{\n'
return code % self.name
#def cpp_function_declaration_code(self):
# pass
#def cpp_function_call_code(self):
#s pass
def parse_tuple_code(self):
""" Create code block for PyArg_ParseTuple. Variable declarations
for all PyObjects are done also.
This code got a lot uglier when I added local_dict...
"""
declare_return = 'py::object return_val;\n' \
'int exception_occurred = 0;\n' \
'PyObject *py_local_dict = NULL;\n'
arg_string_list = self.arg_specs.variable_as_strings() + ['"local_dict"']
arg_strings = ','.join(arg_string_list)
if arg_strings:
arg_strings += ','
declare_kwlist = 'static const char *kwlist[] = {%s NULL};\n' % \
arg_strings
py_objects = ', '.join(self.arg_specs.py_pointers())
init_flags = ', '.join(self.arg_specs.init_flags())
init_flags_init = '= '.join(self.arg_specs.init_flags())
py_vars = ' = '.join(self.arg_specs.py_variables())
if py_objects:
declare_py_objects = 'PyObject ' + py_objects + ';\n'
declare_py_objects += 'int ' + init_flags + ';\n'
init_values = py_vars + ' = NULL;\n'
init_values += init_flags_init + ' = 0;\n\n'
else:
declare_py_objects = ''
init_values = ''
#Each variable is in charge of its own cleanup now.
#cnt = len(arg_list)
#declare_cleanup = "blitz::TinyVector<PyObject*,%d> clean_up(0);\n" % cnt
ref_string = ', '.join(self.arg_specs.py_references())
if ref_string:
ref_string += ', &py_local_dict'
else:
ref_string = '&py_local_dict'
format = "O" * len(self.arg_specs) + "|O" + ':' + self.name
parse_tuple = 'if(!PyArg_ParseTupleAndKeywords(args,' \
'kywds,"%s",const_cast<char**>(kwlist),%s))\n' % \
(format,ref_string)
parse_tuple += ' return NULL;\n'
return declare_return + declare_kwlist + declare_py_objects \
+ init_values + parse_tuple
def arg_declaration_code(self):
arg_strings = []
for arg in self.arg_specs:
arg_strings.append(arg.declaration_code())
arg_strings.append(arg.init_flag() + " = 1;\n")
code = "".join(arg_strings)
return code
def arg_cleanup_code(self):
arg_strings = []
have_cleanup = filter(lambda x:x.cleanup_code(),self.arg_specs)
for arg in have_cleanup:
code = "if(%s)\n" % arg.init_flag()
code += "{\n"
code += indent(arg.cleanup_code(),4)
code += "}\n"
arg_strings.append(code)
code = "".join(arg_strings)
return code
def arg_local_dict_code(self):
arg_strings = []
for arg in self.arg_specs:
arg_strings.append(arg.local_dict_code())
code = "".join(arg_strings)
return code
def function_code(self):
decl_code = indent(self.arg_declaration_code(),4)
cleanup_code = indent(self.arg_cleanup_code(),4)
function_code = indent(self.code_block,4)
local_dict_code = indent(self.arg_local_dict_code(),4)
dict_code = "if(py_local_dict) \n" \
"{ \n" \
" py::dict local_dict = py::dict(py_local_dict); \n" + \
local_dict_code + \
"} \n"
try_code = "try \n" \
"{ \n" + \
decl_code + \
" /*<function call here>*/ \n" + \
function_code + \
indent(dict_code,4) + \
"\n} \n"
catch_code = "catch(...) \n" \
"{ \n" + \
" return_val = py::object(); \n" \
" exception_occurred = 1; \n" \
"} \n"
return_code = " /*cleanup code*/ \n" + \
cleanup_code + \
' if(!(PyObject*)return_val && !exception_occurred)\n' \
' {\n \n' \
' return_val = Py_None; \n' \
' }\n \n' \
' return return_val.disown(); \n' \
'} \n'
all_code = self.function_declaration_code() + \
indent(self.parse_tuple_code(),4) + \
indent(try_code,4) + \
indent(catch_code,4) + \
return_code
return all_code
def python_function_definition_code(self):
args = (self.name, self.name)
function_decls = '{"%s",(PyCFunction)%s , METH_VARARGS|' \
'METH_KEYWORDS},\n' % args
return function_decls
def set_compiler(self,compiler):
self.compiler = compiler
for arg in self.arg_specs:
arg.set_compiler(compiler)
class ext_function(ext_function_from_specs):
def __init__(self,name,code_block, args, local_dict=None, global_dict=None,
auto_downcast=1, type_converters=None):
call_frame = sys._getframe().f_back
if local_dict is None:
local_dict = call_frame.f_locals
if global_dict is None:
global_dict = call_frame.f_globals
if type_converters is None:
type_converters = converters.default
arg_specs = assign_variable_types(args,local_dict, global_dict,
auto_downcast, type_converters)
ext_function_from_specs.__init__(self,name,code_block,arg_specs)
from . import base_info
class ext_module(object):
def __init__(self,name,compiler=''):
standard_info = converters.standard_info
self.name = name
self.functions = []
self.compiler = compiler
self.customize = base_info.custom_info()
self._build_information = base_info.info_list(standard_info)
def add_function(self,func):
self.functions.append(func)
def module_code(self):
code = '\n'.join([
"""\
#ifdef __CPLUSPLUS__
extern "C" {
#endif
""",
self.warning_code(),
self.header_code(),
self.support_code(),
self.function_code(),
self.python_function_definition_code(),
self.module_init_code(),
"""\
#ifdef __CPLUSCPLUS__
}
#endif
"""
])
return code
def arg_specs(self):
all_arg_specs = base_spec.arg_spec_list()
for func in self.functions:
all_arg_specs += func.arg_specs
return all_arg_specs
def build_information(self):
info = self._build_information + [self.customize] + \
self.arg_specs().build_information()
for func in self.functions:
info.append(func.customize)
#redundant, but easiest place to make sure compiler is set
for i in info:
i.set_compiler(self.compiler)
return info
def get_headers(self):
all_headers = self.build_information().headers()
# blitz/array.h always needs to go before most other headers, so we
# hack that here, but we need to ensure that Python.h is the very
# first header included. As indicated in
# http://docs.python.org/api/includes.html
# "Warning: Since Python may define some pre-processor definitions which
# affect the standard headers on some systems, you must include Python.h
# before any standard headers are included. "
# Since blitz/array.h pulls in system headers, we must massage this
# list a bit so that the order is Python.h, blitz/array.h, ...
if '"blitz/array.h"' in all_headers:
all_headers.remove('"blitz/array.h"')
# Insert blitz AFTER Python.h, which must remain the first header
all_headers.insert(1,'"blitz/array.h"')
return all_headers
def warning_code(self):
all_warnings = self.build_information().warnings()
w = map(lambda x: "#pragma warning(%s)\n" % x,all_warnings)
return '#ifndef __GNUC__\n' + ''.join(w) + '\n#endif'
def header_code(self):
h = self.get_headers()
h = map(lambda x: '#include ' + x + '\n',h)
return ''.join(h) + '\n'
def support_code(self):
code = self.build_information().support_code()
return ''.join(code) + '\n'
def function_code(self):
all_function_code = ""
for func in self.functions:
all_function_code += func.function_code()
return ''.join(all_function_code) + '\n'
def python_function_definition_code(self):
all_definition_code = ""
for func in self.functions:
all_definition_code += func.python_function_definition_code()
all_definition_code = indent(''.join(all_definition_code),4)
code = 'static PyMethodDef compiled_methods[] = \n' \
'{\n' \
'%s' \
' {NULL, NULL} /* Sentinel */\n' \
'};\n'
return code % (all_definition_code)
def module_init_code(self):
init_code_list = self.build_information().module_init_code()
init_code = indent(''.join(init_code_list),4)
code = 'PyMODINIT_FUNC init%s(void)\n' \
'{\n' \
'%s' \
' (void) Py_InitModule("%s", compiled_methods);\n' \
'}\n' % (self.name,init_code,self.name)
return code
def generate_file(self,file_name="",location='.'):
code = self.module_code()
if not file_name:
file_name = self.name + '.cpp'
name = generate_file_name(file_name,location)
#return name
return generate_module(code,name)
def set_compiler(self,compiler):
# This is not used anymore -- I think we should ditch it.
#for i in self.arg_specs()
# i.set_compiler(compiler)
for i in self.build_information():
i.set_compiler(compiler)
for i in self.functions:
i.set_compiler(compiler)
self.compiler = compiler
def build_kw_and_file(self,location,kw):
arg_specs = self.arg_specs()
info = self.build_information()
_source_files = info.sources()
# remove duplicates
source_files = {}
for i in _source_files:
source_files[i] = None
source_files = source_files.keys()
# add internally specified macros, includes, etc. to the key words
# values of the same names so that distutils will use them.
kw['define_macros'] = kw.get('define_macros',[]) + \
info.define_macros()
kw['include_dirs'] = kw.get('include_dirs',[]) + info.include_dirs()
kw['libraries'] = kw.get('libraries',[]) + info.libraries()
kw['library_dirs'] = kw.get('library_dirs',[]) + info.library_dirs()
kw['extra_compile_args'] = kw.get('extra_compile_args',[]) + \
info.extra_compile_args()
kw['extra_link_args'] = kw.get('extra_link_args',[]) + \
info.extra_link_args()
kw['sources'] = kw.get('sources',[]) + source_files
file = self.generate_file(location=location)
return kw,file
def setup_extension(self,location='.',**kw):
kw,file = self.build_kw_and_file(location,kw)
return build_tools.create_extension(file, **kw)
def compile(self,location='.',compiler=None, verbose=0, **kw):
if compiler is not None:
self.compiler = compiler
# !! removed -- we don't have any compiler dependent code
# currently in spec or info classes
# hmm. Is there a cleaner way to do this? Seems like
# choosing the compiler spagettis around a little.
#compiler = build_tools.choose_compiler(self.compiler)
#self.set_compiler(compiler)
kw,file = self.build_kw_and_file(location,kw)
# This is needed so that files build correctly even when different
# versions of Python are running around.
# Imported at beginning of file now to help with test paths.
# import catalog
#temp = catalog.default_temp_dir()
# for speed, build in the machines temp directory
temp = catalog.intermediate_dir()
success = build_tools.build_extension(file, temp_dir=temp,
compiler_name=compiler,
verbose=verbose, **kw)
if not success:
raise SystemError('Compilation failed')
def generate_file_name(module_name,module_location):
module_file = os.path.join(module_location,module_name)
return os.path.abspath(module_file)
def generate_module(module_string, module_file):
""" generate the source code file. Only overwrite
the existing file if the actual source has changed.
"""
file_changed = 1
if os.path.exists(module_file):
f = open(module_file,'r')
old_string = f.read()
f.close()
if old_string == module_string:
file_changed = 0
if file_changed:
f = open(module_file,'w')
f.write(module_string)
f.close()
return module_file
def assign_variable_types(variables,local_dict={}, global_dict={},
auto_downcast=1,
type_converters=converters.default):
incoming_vars = {}
incoming_vars.update(global_dict)
incoming_vars.update(local_dict)
variable_specs = []
errors = {}
for var in variables:
try:
example_type = incoming_vars[var]
# look through possible type specs to find which one
# should be used to for example_type
spec = None
for factory in type_converters:
if factory.type_match(example_type):
spec = factory.type_spec(var,example_type)
break
if not spec:
# should really define our own type.
raise IndexError
else:
variable_specs.append(spec)
except KeyError:
errors[var] = ("The type and dimensionality specifications" +
"for variable '" + var + "' are missing.")
except IndexError:
errors[var] = ("Unable to convert variable '" + var +
"' to a C++ type.")
if errors:
raise TypeError(format_error_msg(errors))
if auto_downcast:
variable_specs = downcast(variable_specs)
return variable_specs
def downcast(var_specs):
""" Cast python scalars down to most common type of
arrays used.
Right now, focus on complex and float types. Ignore int types.
Require all arrays to have same type before forcing downcasts.
Note: var_specs are currently altered in place (horrors...!)
"""
numeric_types = []
#grab all the numeric types associated with a variables.
for var in var_specs:
if hasattr(var,'numeric_type'):
numeric_types.append(var.numeric_type)
# if arrays are present, but none of them are double precision,
# make all numeric types float or complex(float)
if (('f' in numeric_types or 'F' in numeric_types) and
not ('d' in numeric_types or 'D' in numeric_types)):
for var in var_specs:
if hasattr(var,'numeric_type'):
if issubclass(var.numeric_type, complex):
var.numeric_type = 'F'
elif issubclass(var.numeric_type, float):
var.numeric_type = 'f'
return var_specs
def indent(st,spaces):
indention = ' '*spaces
indented = indention + st.replace('\n','\n'+indention)
# trim off any trailing spaces
indented = re.sub(r' +$',r'',indented)
return indented
def format_error_msg(errors):
#minimum effort right now...
import pprint
import cStringIO
msg = cStringIO.StringIO()
pprint.pprint(errors,msg)
return msg.getvalue()
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/scipy/weave/ext_tools.py
|
Python
|
agpl-3.0
| 17,815
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
#Copyright (C) DJ dj@namurlug.org http://blog.dedj.be
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import xmlrpclib
import SOAPpy
import os
from pytrainer.lib.date import Date
from pytrainer.lib.soapUtils import *
from optparse import OptionParser
class Main:
def __init__(self,options):
# on definit les parametres de connection au blog
self.xmlrpcserver = options.xmlrpcserver
self.blogid = options.blogid
self.user = options.bloguser
self.password = options.blogpass
self.category_nbr = options.blogcategory
#if you want accept comment, replace False by True
self.comment = False
#or ping
self.ping = False
self.category = [{'isPrimary': True, "categoryId" : self.category_nbr}]
self.error = ""
self.log =""
self.idrecord = options.idrecord
self.webserviceserver = SOAPpy.SOAPProxy("http://localhost:8081/")
#we try the connection to the xml/rpc server
try :
self.connect = xmlrpclib.Server(self.xmlrpcserver)
self.error = False
except :
print "can't connect the server"
def loadRecordInfo(self):
date = Date()
record = self.webserviceserver.getRecordInfo(self.idrecord)
self.sport = record["sport"]
self.date = record["date"]
self.distance = record["distance"]
self.time = date.second2time(float(record["time"]))
self.heure = self.time[0]
self.minute = self.time[1]
self.seconde = self.time[2]
self.beats = record["beats"]
self.comments = record["comments"]
self.average = record["average"]
self.calories = record["calories"]
self.title = record["title"]
self.upositive = record["upositive"]
self.unegative = record["unegative"]
def construction(self):
#this methode create the content post content, the section between the ''' are on html thake wath you want
#self.date, self.distance, self.time, self.beats, self.comments, self.average, self.calories, self.title, self.title, self.upositive, self.unegative, self.sport
description_table = '''
<p>%s %s</p>
<p>With a average of %s km/h</p><br />
<p>I have lost %s kcalories and my heart have %s pulsations (on average)</p>
''' %(self.sport, self.title,self.average, self.calories, self.beats)
return description_table
def chapeau(self):
chapeau_table = '''
<p>A %s score on a distance of %s km on %sh %sm %s </p><br />
''' %(self.sport,self.distance,self.heure, self.minute, self.seconde)
return chapeau_table
def run(self):
#we load all info for the record
self.loadRecordInfo()
blog_desc = self.construction()
blog_chap = self.chapeau()
if self.error == False:
#post_description = "Du " + str(self.sport) + " sur une distance de " + str(self.distance) +"km " + " en " + str(self.time) + " ce qui nous fait une moyenne de " + str(self.average)
server = xmlrpclib.Server(options.xmlrpcserver)
content = {'title' : self.date, 'description' : blog_desc, 'mt_allow_comments' : self.comment, 'mt_allow_pings' : self.ping,'mt_excerpt' : blog_chap}
post = server.metaWeblog.newPost(self.blogid, self.user, self.password, content , True)
#we change the post categories because, i( or it's the false of xml/rpc) can't select the categories before the upload on the blog
change_cat = server.mt.setPostCategories(post,self.user,self.password, self.category)
return "The post has been submited"
self.webserviceserver.stop
else:
return self.log
parser = OptionParser()
parser.add_option("-d", "--device", dest="device")
parser.add_option("-k", "--xmlrpcserver", dest="xmlrpcserver")
parser.add_option("-u", "--bloguser", dest="bloguser")
parser.add_option("-p", "--blogpass", dest="blogpass")
parser.add_option("-l", "--blogid", dest="blogid")
parser.add_option("-c", "--blogcategory", dest="blogcategory")
parser.add_option("-g", "--gpxfile", dest="gpxfile")
parser.add_option("-i", "--idrecord", dest="idrecord")
(options,args) = parser.parse_args()
try :
x = Main(options)
x.run()
print "le score du %s a ete envoye" % x.date
except xmlrpclib.Fault, f:
print "ERROR on the server\n Code %i\n Reason %s" % (f.faultCode,
f.faultString)
|
deuxpi/pytrainer
|
extensions/dotclear/main.py
|
Python
|
gpl-2.0
| 5,025
|
data = (
'Yi ', # 0x00
'Jun ', # 0x01
'Nong ', # 0x02
'Chan ', # 0x03
'Yi ', # 0x04
'Dang ', # 0x05
'Jing ', # 0x06
'Xuan ', # 0x07
'Kuai ', # 0x08
'Jian ', # 0x09
'Chu ', # 0x0a
'Dan ', # 0x0b
'Jiao ', # 0x0c
'Sha ', # 0x0d
'Zai ', # 0x0e
'[?] ', # 0x0f
'Bin ', # 0x10
'An ', # 0x11
'Ru ', # 0x12
'Tai ', # 0x13
'Chou ', # 0x14
'Chai ', # 0x15
'Lan ', # 0x16
'Ni ', # 0x17
'Jin ', # 0x18
'Qian ', # 0x19
'Meng ', # 0x1a
'Wu ', # 0x1b
'Ning ', # 0x1c
'Qiong ', # 0x1d
'Ni ', # 0x1e
'Chang ', # 0x1f
'Lie ', # 0x20
'Lei ', # 0x21
'Lu ', # 0x22
'Kuang ', # 0x23
'Bao ', # 0x24
'Du ', # 0x25
'Biao ', # 0x26
'Zan ', # 0x27
'Zhi ', # 0x28
'Si ', # 0x29
'You ', # 0x2a
'Hao ', # 0x2b
'Chen ', # 0x2c
'Chen ', # 0x2d
'Li ', # 0x2e
'Teng ', # 0x2f
'Wei ', # 0x30
'Long ', # 0x31
'Chu ', # 0x32
'Chan ', # 0x33
'Rang ', # 0x34
'Shu ', # 0x35
'Hui ', # 0x36
'Li ', # 0x37
'Luo ', # 0x38
'Zan ', # 0x39
'Nuo ', # 0x3a
'Tang ', # 0x3b
'Yan ', # 0x3c
'Lei ', # 0x3d
'Nang ', # 0x3e
'Er ', # 0x3f
'Wu ', # 0x40
'Yun ', # 0x41
'Zan ', # 0x42
'Yuan ', # 0x43
'Xiong ', # 0x44
'Chong ', # 0x45
'Zhao ', # 0x46
'Xiong ', # 0x47
'Xian ', # 0x48
'Guang ', # 0x49
'Dui ', # 0x4a
'Ke ', # 0x4b
'Dui ', # 0x4c
'Mian ', # 0x4d
'Tu ', # 0x4e
'Chang ', # 0x4f
'Er ', # 0x50
'Dui ', # 0x51
'Er ', # 0x52
'Xin ', # 0x53
'Tu ', # 0x54
'Si ', # 0x55
'Yan ', # 0x56
'Yan ', # 0x57
'Shi ', # 0x58
'Shi ', # 0x59
'Dang ', # 0x5a
'Qian ', # 0x5b
'Dou ', # 0x5c
'Fen ', # 0x5d
'Mao ', # 0x5e
'Shen ', # 0x5f
'Dou ', # 0x60
'Bai ', # 0x61
'Jing ', # 0x62
'Li ', # 0x63
'Huang ', # 0x64
'Ru ', # 0x65
'Wang ', # 0x66
'Nei ', # 0x67
'Quan ', # 0x68
'Liang ', # 0x69
'Yu ', # 0x6a
'Ba ', # 0x6b
'Gong ', # 0x6c
'Liu ', # 0x6d
'Xi ', # 0x6e
'[?] ', # 0x6f
'Lan ', # 0x70
'Gong ', # 0x71
'Tian ', # 0x72
'Guan ', # 0x73
'Xing ', # 0x74
'Bing ', # 0x75
'Qi ', # 0x76
'Ju ', # 0x77
'Dian ', # 0x78
'Zi ', # 0x79
'Ppwun ', # 0x7a
'Yang ', # 0x7b
'Jian ', # 0x7c
'Shou ', # 0x7d
'Ji ', # 0x7e
'Yi ', # 0x7f
'Ji ', # 0x80
'Chan ', # 0x81
'Jiong ', # 0x82
'Mao ', # 0x83
'Ran ', # 0x84
'Nei ', # 0x85
'Yuan ', # 0x86
'Mao ', # 0x87
'Gang ', # 0x88
'Ran ', # 0x89
'Ce ', # 0x8a
'Jiong ', # 0x8b
'Ce ', # 0x8c
'Zai ', # 0x8d
'Gua ', # 0x8e
'Jiong ', # 0x8f
'Mao ', # 0x90
'Zhou ', # 0x91
'Mou ', # 0x92
'Gou ', # 0x93
'Xu ', # 0x94
'Mian ', # 0x95
'Mi ', # 0x96
'Rong ', # 0x97
'Yin ', # 0x98
'Xie ', # 0x99
'Kan ', # 0x9a
'Jun ', # 0x9b
'Nong ', # 0x9c
'Yi ', # 0x9d
'Mi ', # 0x9e
'Shi ', # 0x9f
'Guan ', # 0xa0
'Meng ', # 0xa1
'Zhong ', # 0xa2
'Ju ', # 0xa3
'Yuan ', # 0xa4
'Ming ', # 0xa5
'Kou ', # 0xa6
'Lam ', # 0xa7
'Fu ', # 0xa8
'Xie ', # 0xa9
'Mi ', # 0xaa
'Bing ', # 0xab
'Dong ', # 0xac
'Tai ', # 0xad
'Gang ', # 0xae
'Feng ', # 0xaf
'Bing ', # 0xb0
'Hu ', # 0xb1
'Chong ', # 0xb2
'Jue ', # 0xb3
'Hu ', # 0xb4
'Kuang ', # 0xb5
'Ye ', # 0xb6
'Leng ', # 0xb7
'Pan ', # 0xb8
'Fu ', # 0xb9
'Min ', # 0xba
'Dong ', # 0xbb
'Xian ', # 0xbc
'Lie ', # 0xbd
'Xia ', # 0xbe
'Jian ', # 0xbf
'Jing ', # 0xc0
'Shu ', # 0xc1
'Mei ', # 0xc2
'Tu ', # 0xc3
'Qi ', # 0xc4
'Gu ', # 0xc5
'Zhun ', # 0xc6
'Song ', # 0xc7
'Jing ', # 0xc8
'Liang ', # 0xc9
'Qing ', # 0xca
'Diao ', # 0xcb
'Ling ', # 0xcc
'Dong ', # 0xcd
'Gan ', # 0xce
'Jian ', # 0xcf
'Yin ', # 0xd0
'Cou ', # 0xd1
'Yi ', # 0xd2
'Li ', # 0xd3
'Cang ', # 0xd4
'Ming ', # 0xd5
'Zhuen ', # 0xd6
'Cui ', # 0xd7
'Si ', # 0xd8
'Duo ', # 0xd9
'Jin ', # 0xda
'Lin ', # 0xdb
'Lin ', # 0xdc
'Ning ', # 0xdd
'Xi ', # 0xde
'Du ', # 0xdf
'Ji ', # 0xe0
'Fan ', # 0xe1
'Fan ', # 0xe2
'Fan ', # 0xe3
'Feng ', # 0xe4
'Ju ', # 0xe5
'Chu ', # 0xe6
'Tako ', # 0xe7
'Feng ', # 0xe8
'Mok ', # 0xe9
'Ci ', # 0xea
'Fu ', # 0xeb
'Feng ', # 0xec
'Ping ', # 0xed
'Feng ', # 0xee
'Kai ', # 0xef
'Huang ', # 0xf0
'Kai ', # 0xf1
'Gan ', # 0xf2
'Deng ', # 0xf3
'Ping ', # 0xf4
'Qu ', # 0xf5
'Xiong ', # 0xf6
'Kuai ', # 0xf7
'Tu ', # 0xf8
'Ao ', # 0xf9
'Chu ', # 0xfa
'Ji ', # 0xfb
'Dang ', # 0xfc
'Han ', # 0xfd
'Han ', # 0xfe
'Zao ', # 0xff
)
|
samuelmaudo/yepes
|
yepes/utils/unidecode/x051.py
|
Python
|
bsd-3-clause
| 4,685
|
"""Tests for Met.no config flow."""
from datetime import timedelta
from unittest.mock import patch
import pytest
from transmissionrpc.error import TransmissionError
from homeassistant import data_entry_flow
from homeassistant.components.transmission import config_flow
from homeassistant.components.transmission.const import (
DEFAULT_NAME,
DEFAULT_PORT,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
from tests.common import MockConfigEntry
NAME = "Transmission"
HOST = "192.168.1.100"
USERNAME = "username"
PASSWORD = "password"
PORT = 9091
SCAN_INTERVAL = 10
@pytest.fixture(name="api")
def mock_transmission_api():
"""Mock an api."""
with patch("transmissionrpc.Client"):
yield
@pytest.fixture(name="auth_error")
def mock_api_authentication_error():
"""Mock an api."""
with patch(
"transmissionrpc.Client", side_effect=TransmissionError("401: Unauthorized")
):
yield
@pytest.fixture(name="conn_error")
def mock_api_connection_error():
"""Mock an api."""
with patch(
"transmissionrpc.Client",
side_effect=TransmissionError("111: Connection refused"),
):
yield
@pytest.fixture(name="unknown_error")
def mock_api_unknown_error():
"""Mock an api."""
with patch("transmissionrpc.Client", side_effect=TransmissionError):
yield
def init_config_flow(hass):
"""Init a configuration flow."""
flow = config_flow.TransmissionFlowHandler()
flow.hass = hass
return flow
async def test_flow_works(hass, api):
"""Test user config."""
flow = init_config_flow(hass)
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# test with required fields only
result = await flow.async_step_user(
{CONF_NAME: NAME, CONF_HOST: HOST, CONF_PORT: PORT}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == NAME
assert result["data"][CONF_NAME] == NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_PORT] == PORT
assert result["data"]["options"][CONF_SCAN_INTERVAL] == DEFAULT_SCAN_INTERVAL
# test with all provided
result = await flow.async_step_user(
{
CONF_NAME: NAME,
CONF_HOST: HOST,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_PORT: PORT,
}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == NAME
assert result["data"][CONF_NAME] == NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_PORT] == PORT
assert result["data"]["options"][CONF_SCAN_INTERVAL] == DEFAULT_SCAN_INTERVAL
async def test_options(hass):
"""Test updating options."""
entry = MockConfigEntry(
domain=DOMAIN,
title=CONF_NAME,
data={
"name": DEFAULT_NAME,
"host": HOST,
"username": USERNAME,
"password": PASSWORD,
"port": DEFAULT_PORT,
"options": {CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL},
},
options={CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL},
)
flow = init_config_flow(hass)
options_flow = flow.async_get_options_flow(entry)
result = await options_flow.async_step_init()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await options_flow.async_step_init({CONF_SCAN_INTERVAL: 10})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"][CONF_SCAN_INTERVAL] == 10
async def test_import(hass, api):
"""Test import step."""
flow = init_config_flow(hass)
# import with minimum fields only
result = await flow.async_step_import(
{
CONF_NAME: DEFAULT_NAME,
CONF_HOST: HOST,
CONF_PORT: DEFAULT_PORT,
CONF_SCAN_INTERVAL: timedelta(seconds=DEFAULT_SCAN_INTERVAL),
}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == DEFAULT_NAME
assert result["data"][CONF_NAME] == DEFAULT_NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_PORT] == DEFAULT_PORT
assert result["data"]["options"][CONF_SCAN_INTERVAL] == DEFAULT_SCAN_INTERVAL
# import with all
result = await flow.async_step_import(
{
CONF_NAME: NAME,
CONF_HOST: HOST,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_PORT: PORT,
CONF_SCAN_INTERVAL: timedelta(seconds=SCAN_INTERVAL),
}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == NAME
assert result["data"][CONF_NAME] == NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_PORT] == PORT
assert result["data"]["options"][CONF_SCAN_INTERVAL] == SCAN_INTERVAL
async def test_integration_already_exists(hass, api):
"""Test we only allow a single config flow."""
MockConfigEntry(domain=DOMAIN).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == "abort"
assert result["reason"] == "one_instance_allowed"
async def test_error_on_wrong_credentials(hass, auth_error):
"""Test with wrong credentials."""
flow = init_config_flow(hass)
result = await flow.async_step_user(
{
CONF_NAME: NAME,
CONF_HOST: HOST,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_PORT: PORT,
}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {
CONF_USERNAME: "wrong_credentials",
CONF_PASSWORD: "wrong_credentials",
}
async def test_error_on_connection_failure(hass, conn_error):
"""Test when connection to host fails."""
flow = init_config_flow(hass)
result = await flow.async_step_user(
{
CONF_NAME: NAME,
CONF_HOST: HOST,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_PORT: PORT,
}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "cannot_connect"}
async def test_error_on_unknwon_error(hass, unknown_error):
"""Test when connection to host fails."""
flow = init_config_flow(hass)
result = await flow.async_step_user(
{
CONF_NAME: NAME,
CONF_HOST: HOST,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_PORT: PORT,
}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "cannot_connect"}
|
Cinntax/home-assistant
|
tests/components/transmission/test_config_flow.py
|
Python
|
apache-2.0
| 7,297
|
def application(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
#return '<h1>Hello, NCIC,ICT,CAS!</h1>'
return '<h1> %s!</h1>' % (environ['PATH_INFO'][1:] or 'NCIC,ICT,CAS')
|
haohaibo/tutorial
|
python/web-dev/hello.py
|
Python
|
mit
| 222
|
# -*- coding: utf-8 -*-
"""nodoctest
Configure and Start a Notebook Server
The :class:`NotebookObject` is used to configure and launch a Sage
Notebook server.
"""
#############################################################################
# Copyright (C) 2007 William Stein <wstein@gmail.com>
# (C) 2015 J. Miguel Farto <jmfarto@gmail.com>
# Distributed under the terms of the GNU General Public License (GPL)
# The full text of the GPL is available at:
# http://www.gnu.org/licenses/
#############################################################################
from __future__ import absolute_import
from __future__ import print_function
import os
import shutil
import tempfile
import pexpect
from ..run import NotebookFrontend
from ..gui import notebook as _notebook
class NotebookObject:
r"""
Start the Sage Notebook server. More details about using these
options, as well as tips and tricks, may be available at `this
Sage wiki page`_. If a notebook server is already running in the
directory, this will open a browser to the running notebook.
INPUT:
- ``directory`` -- string; directory that contains the Sage
notebook files; the default is
``.sage/sage_notebook.sagenb``, in your home directory.
- ``port`` -- integer (default: ``8080``), port to serve the
notebook on.
- ``interface`` -- string (default: ``'localhost'``), address
of network interface to listen on; give ``''`` to listen on
all interfaces.
- ``port_tries`` -- integer (default: ``0``), number of
additional ports to try if the first one doesn't work (*not*
implemented).
- ``secure`` -- boolean (default: ``False``) if True use https
so all communication, e.g., logins and passwords, between
web browsers and the Sage notebook is encrypted via SSL. You
must have OpenSSL installed to use this feature, or if you compile
Sage yourself, have the OpenSSL development libraries installed.
*Highly recommended!*
When ``notebook()`` is run for first time with ``secure=True``, it
will generate new keys and store them to ``.sage/notebook/``.
Remove this when you want to generate new keys, for example if an
older version of Sage has generated keys that are too short for
current browsers.
- ``reset`` -- boolean (default: ``False``) if True allows you
to set the admin password. Use this if you forget your
admin password.
- ``accounts`` -- boolean (default: ``False``) if True, any
visitor to the website will be able to create a new account.
If False, only the admin can create accounts (currently,
this can only be done by running with ``accounts=True`` and
shutting down the server properly (``SIG_INT`` or
``SIG_TERM``), or on the command line with, e.g.,
::
from sagenb.notebook.notebook import load_notebook
nb = load_notebook("directory_to_run_sage_in")
user_manager = nb.user_manager
nb.conf['accounts'] = True
user_manager.add_user("username", "password", "email@place",
"user")
nb.save()
- ``automatic_login`` -- boolean (default: True) whether to pop up
a web browser and automatically log into the server as admin. You
can override the default browser by setting the ``SAGE_BROWSER``
environment variable, e.g., by putting
::
export SAGE_BROWSER="firefox"
in the file .bashrc in your home directory.
- ``upload`` -- string (default: None) Full path to a local file
(sws, txt, zip) to be uploaded and turned into a worksheet(s).
This is equivalent to manually uploading a file via
``http://localhost:8080/upload`` or to fetching
``http://localhost:8080/upload_worksheet?url=file:///...``
in a script except that (hopefully) you will already be
logged in.
.. warning::
If you are running a server for others to log into, set
``automatic_login=False``. Otherwise, all of the worksheets on
the entire server will be loaded when the server automatically
logs into the admin account.
- ``timeout`` -- integer (default: 0) seconds until idle
worksheet sessions automatically timeout, i.e., the
corresponding Sage session terminates. 0 means "never
timeout". If your server is running out of memory, setting a
timeout can be useful as this will free the memory used by
idle sessions.
- ``doc_timeout`` -- integer (default: 600) seconds until idle
live documentation worksheet sessions automatically timeout,
i.e., the corresponding Sage session terminates.
0 means "never timeout".
- ``server_pool`` -- list of strings (default: None) list;
this option specifies that worksheet processes run as a
separate user (chosen from the list in the ``server_pool``
-- see below).
.. note::
If you have problems with the server certificate hostname not
matching, do ``notebook.setup()``.
.. note::
The ``require_login`` option has been removed. Use ``automatic_login``
to control automatic logins instead---``automatic_login=False``
corresponds to ``require_login=True``.
EXAMPLES:
1. I just want to run the Sage notebook. Type::
notebook()
2. I want to run the Sage notebook server on a remote machine and
be the only person allowed to log in. Type::
notebook(interface='', secure=True)
the first time you do this you'll be prompted to set an
administrator password. Use this to login. NOTE: You may have
to run ``notebook.setup()`` again and change the hostname.
ANOTHER NOTE: You must have installed pyOpenSSL in order to use
secure mode; see the top-level Sage README file or the "Install
from Source Code" section in the Sage manual for more
information.
3. I want to create a Sage notebook server that is open to anybody
in the world to create new accounts. To run the Sage notebook
publicly (1) at a minimum run it from a chroot jail or inside a
virtual machine (see `this Sage wiki page`_) and (2) use a
command like::
notebook(interface='', server_pool=['sage1@localhost'],
ulimit='-v 500000', accounts=True, automatic_login=False)
The server_pool option specifies that worksheet processes run
as a separate user. The ulimit option restricts the memory
available to each worksheet processes to 500 MB. See help on
the ``accounts`` option above.
Be sure that ``sage_notebook.sagenb/users.pickle`` and the
contents of ``sage_notebook.sagenb/backups`` are chmod
``og-rwx``, i.e., only readable by the notebook process, since
otherwise any user can read ``users.pickle``, which contains
user email addresses and account information (passwords are
stored hashed, so fewer worries there). You will need to use
the ``directory`` option to accomplish this.
INPUT: (more advanced)
- ``server_pool`` -- list of strings (initial default: None),
if given, should be a list like \['sage1@localhost',
'sage2@localhost'\], where you have setup ssh keys so that
typing::
ssh sage1@localhost
logs in without requiring a password, e.g., by typing
``ssh-keygen`` as the notebook server user, then putting
``~/.ssh/id_rsa.pub`` as the file ``.ssh/authorized_keys``.
Note: you have to get the permissions of files and
directories just right -- see `this Sage wiki page`_ for
more details.
Files between the main Sage process and the ``server_pool``
workers are transferred through ``/tmp`` by default. If the
environment variable :envvar:`SAGENB_TMPDIR` or
:envvar:`TMPDIR` exists, that directory is used instead. This
directory must be shared, so if the machines are separate the
server machine must NFS-export ``/tmp`` or
:envvar:`SAGENB_TMPDIR`.
- ``server`` -- string ("twistd" (default) or "flask"). The server
to use to server content.
- ``profile`` -- True, False, or file prefix (default: False - no
profiling),
If True, profiling is saved to a randomly-named file like
`sagenb-*-profile*.stats` in the $DOT_SAGE directory. If a string,
that string is used as a prefix for the pstats data file.
- ``ulimit`` -- string (initial default: None -- leave as is),
if given and ``server_pool`` is also given, the worksheet
processes are run with these constraints. See the ulimit
documentation. Common options include:
- ``-t`` The maximum amount of cpu time in seconds.
NOTE: For Sage, ``-t`` is the wall time, not cpu time.
- ``-u`` The maximum number of processes available to a
single user.
- ``-v`` The maximum amount of virtual memory available
to the process.
Values are in 1024-byte increments, except for ``-t``, which
is in seconds, and ``-u`` which is a positive
integer. Example: ulimit="-v 400000 -t 30"
.. note::
The values of ``server_pool`` and ``ulimit`` default to what
they were last time the notebook command was called.
OTHER NOTES:
- If you create a file ``\\$DOT_SAGE/notebook.css`` then it
will get applied when rendering the notebook HTML. This
allows notebook administrators to customize the look of the
notebook. Note that by default ``\\$DOT_SAGE`` is
``\\$HOME/.sage``.
.. _this Sage wiki page: http://wiki.sagemath.org/StartingTheNotebook
"""
def __init__(self, *args, **kwargs):
self.nb_frend = NotebookFrontend()
def __call__(self, *args, **kwds):
return self.notebook(*args, **kwds)
def notebook(self,
directory=None,
port=8080,
interface='localhost',
port_tries=50,
secure=False,
reset=False,
accounts=None,
openid=False,
server_pool=None,
ulimit='',
timeout=None,
doc_timeout=None,
upload=None,
automatic_login=True,
start_path='',
fork=False,
quiet=False,
server='twistd',
profile=False,
subnets=None,
require_login=None,
open_viewer=None,
address=None):
loc = locals().copy()
del loc['self']
loc['no_automatic_login'] = not loc['automatic_login']
del loc['automatic_login']
args = []
for arg in ['directory', 'accounts', 'server_pool',
'timeout', 'doc_timeout', 'upload', 'subnets',
'require_login', 'open_viewer', 'address']:
val = loc.get(arg, None)
if val is not None:
args.append('--{}'.format(arg))
args.append(str(val))
for arg in ['port', 'interface', 'port_tries', 'ulimit', 'start_path',
'server']:
val = loc.get(arg, None)
if val:
args.append('--{}'.format(arg))
args.append(str(val))
for arg in ['secure', 'reset', 'openid', 'no_automatic_login', 'fork',
'quiet', 'profile']:
if loc.get(arg):
args.append('--{}'.format(arg))
self.nb_frend(args)
def setup(self, *args, **kwargs):
self.nb_frend.parse(['--secure'])
self.nb_frend.update_conf()
self.nb_frend.notebook_setup(*args, **kwargs)
notebook = NotebookObject()
def inotebook(*args, **kwds):
"""
Exactly the same as ``notebook(...)`` but with ``secure=False``.
"""
kwds['secure'] = False
notebook(*args, **kwds)
def test_notebook(admin_passwd, secure=False, directory=None, port=8050,
interface='localhost', verbose=False):
"""
This function is used to test notebook server functions.
EXAMPLES::
sage: from sagenb.notebook.notebook_object import test_notebook
sage: passwd = str(randint(1,1<<128))
sage: nb = test_notebook(passwd, interface='localhost', port=8060)
sage: from six.moves.urllib.request import urlopen
sage: h = urlopen('http://localhost:8060')
sage: homepage = h.read()
sage: h.close()
sage: 'html' in homepage
True
sage: nb.dispose()
"""
if directory is None:
directory = tempfile.mkdtemp()
if not os.path.exists(directory):
os.makedirs(directory)
nb = _notebook.load_notebook(directory)
nb.set_accounts(True)
nb.add_user('admin', admin_passwd, '')
nb.set_accounts(False)
nb.save()
p = notebook(directory=directory, accounts=True, secure=secure, port=port,
interface=interface, automatic_login=False, fork=True,
quiet=True)
p.expect("Starting factory")
def dispose():
try:
p.send('\x03') # control-C
except pexpect.EOF:
pass
p.close(force=True)
shutil.rmtree(nb.dir)
p.dispose = dispose
if verbose:
print("Notebook started.")
return p
|
migeruhito/sagenb
|
sagenb/notebook/notebook_object.py
|
Python
|
gpl-3.0
| 14,029
|
import ast
import json
import os
import re
import time
import urllib2
import environment
import framework
import utils
def cases_iterator(cases):
for case in cases:
if isinstance(case, MultiCase):
for c in case:
yield c
else:
yield case
class Log(object):
def __init__(self, line):
self.line = line
try:
(self.method,
self.remote_address,
self.username,
self.start_time,
self.end_time,
self.total_time,
self.plan_type,
self.original_sql,
self.bind_variables,
self.number_of_queries,
self.rewritten_sql,
self.query_sources,
self.mysql_response_time,
self.waiting_for_connection_time,
self.rowcount,
self.size_of_response,
self.cache_hits,
self.cache_misses,
self.cache_absent,
self.cache_invalidations,
self.error) = line.strip().split('\t')
except ValueError:
print 'Wrong looking line: %r' % line
raise
def check(self, case):
if isinstance(case, basestring):
return []
if isinstance(case, MultiCase):
return sum((self.check(subcase) for subcase in case.sqls_and_cases), [])
failures = []
for method in dir(self):
if method.startswith('check_'):
if not case.is_testing_cache and method.startswith('check_cache_'):
continue
fail = getattr(self, method)(case)
if fail:
failures.append(fail)
return failures
def fail(self, reason, should, is_):
return 'FAIL: %s: %r != %r' % (reason, should, is_)
def check_original_sql(self, case):
# The following is necessary because Python and Go use different
# notations for bindings: %(foo)s vs :foo.
sql = re.sub(r'%\((\w+)\)s', r':\1', case.sql)
# Eval is a cheap hack - Go always uses doublequotes, Python
# prefers single quotes.
if sql != eval(self.original_sql):
return self.fail('wrong sql', case.sql, self.original_sql)
def check_rowcount(self, case):
if case.rowcount is not None and int(self.rowcount) != case.rowcount:
return self.fail('Bad rowcount', case.rowcount, self.rowcount)
def check_cache_hits(self, case):
if case.cache_hits is not None and int(self.cache_hits) != case.cache_hits:
return self.fail('Bad Cache Hits', case.cache_hits, self.cache_hits)
def check_cache_absent(self, case):
if (case.cache_absent is not None and
int(self.cache_absent) != case.cache_absent):
return self.fail('Bad Cache Absent', case.cache_absent, self.cache_absent)
def check_cache_misses(self, case):
if (case.cache_misses is not None and
int(self.cache_misses) != case.cache_misses):
return self.fail(
'Bad Cache Misses', case.cache_misses, self.cache_misses)
def check_cache_invalidations(self, case):
if (case.cache_invalidations is not None and
int(self.cache_invalidations) != case.cache_invalidations):
return self.fail(
'Bad Cache Invalidations', case.cache_invalidations,
self.cache_invalidations)
def check_query_plan(self, case):
if case.query_plan is not None and case.query_plan != self.plan_type:
return self.fail('Bad query plan', case.query_plan, self.plan_type)
def check_rewritten_sql(self, case):
if case.rewritten is None:
return
queries = []
for q in ast.literal_eval(self.rewritten_sql).split(';'):
q = q.strip()
if q and q != '*/':
queries.append(q)
if case.rewritten != queries:
return self.fail('Bad rewritten SQL', case.rewritten, queries)
def check_number_of_queries(self, case):
if (case.rewritten is not None and
int(self.number_of_queries) != len(case.rewritten)):
return self.fail(
'wrong number of queries', len(case.rewritten),
int(self.number_of_queries))
class Case(object):
def __init__(
self, sql, bindings=None, result=None, rewritten=None, doc='',
rowcount=None, cache_table=None, query_plan=None, cache_hits=None,
cache_misses=None, cache_absent=None, cache_invalidations=None,
remote_address='[::1]'):
# For all cache_* parameters, a number n means "check this value
# is exactly n," while None means "I am not interested in this
# value, leave it alone."
self.sql = sql
self.bindings = bindings or {}
self.result = result
if isinstance(rewritten, basestring):
rewritten = [rewritten]
self.rewritten = rewritten
self.rowcount = rowcount
self.doc = doc
self.query_plan = query_plan
self.cache_table = cache_table
self.cache_hits = cache_hits
self.cache_misses = cache_misses
self.cache_absent = cache_absent
self.cache_invalidations = cache_invalidations
self.remote_address = remote_address
@property
def is_testing_cache(self):
return any(attr is not None for attr in [self.cache_hits,
self.cache_misses,
self.cache_absent,
self.cache_invalidations])
def run(self, cursor, env):
failures = []
env.querylog.reset()
if self.is_testing_cache:
tstart = self.table_stats(env)
if self.sql in ('begin', 'commit', 'rollback'):
getattr(cursor._conn, self.sql)()
else:
cursor.execute(self.sql, self.bindings)
if self.result is not None:
result = list(cursor)
if self.result != result:
failures.append('%r:\n%s !=\n%s' % (self.sql, self.result, result))
for i in range(30):
lines = env.querylog.tailer.readLines()
if not lines:
time.sleep(0.1)
continue
break
for line in lines:
case_failures = Log(line).check(self)
if case_failures:
failures.extend(case_failures)
if self.is_testing_cache:
tdelta = self.table_stats_delta(tstart, env)
if self.cache_hits is not None and tdelta['Hits'] != self.cache_hits:
failures.append(
'Bad Cache Hits: %s != %s' % (self.cache_hits, tdelta['Hits']))
if (self.cache_absent is not None and
tdelta['Absent'] != self.cache_absent):
failures.append(
'Bad Cache Absent: %s != %s' %
(self.cache_absent, tdelta['Absent']))
if (self.cache_misses is not None and
tdelta['Misses'] != self.cache_misses):
failures.append(
'Bad Cache Misses: %s != %s' %
(self.cache_misses, tdelta['Misses']))
if (self.cache_invalidations is not None and
tdelta['Invalidations'] != self.cache_invalidations):
failures.append(
'Bad Cache Invalidations: %s != %s' %
(self.cache_invalidations, tdelta['Invalidations']))
return failures
def table_stats_delta(self, old, env):
result = {}
new = self.table_stats(env)
for k, v in new.items():
result[k] = new[k] - old[k]
return result
def table_stats(self, env):
return env.http_get('/debug/table_stats')[self.cache_table]
def __str__(self):
return 'Case %r' % self.doc
class MultiCase(object):
def __init__(self, doc, sqls_and_cases):
self.doc = doc
self.sqls_and_cases = sqls_and_cases
def run(self, cursor, env):
failures = []
for case in self.sqls_and_cases:
if isinstance(case, basestring):
if case in ('begin', 'commit', 'rollback'):
getattr(cursor._conn, case)()
else:
cursor.execute(case)
continue
failures += case.run(cursor, env)
return failures
def __iter__(self):
return iter(self.sqls_and_cases)
def __str__(self):
return 'MultiCase: %s' % self.doc
|
xgwubin/vitess
|
test/queryservice_tests/cases_framework.py
|
Python
|
bsd-3-clause
| 7,742
|
from django import forms
class UploadFileForm(forms.Form):
title = forms.CharField(max_length=50)
file=forms.FileField()
|
rangwala/dmgrader
|
gradingwebapp/gmugrader/polls/forms.py
|
Python
|
gpl-2.0
| 130
|
from django.db import models
# Create your models here.
class Project(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
|
chbrun/behavui
|
behavui/projects/models.py
|
Python
|
gpl-2.0
| 180
|
def <weak_warning descr="Type hinting is missing for function definition">foo</weak_warning>(x, y):
pass
|
paplorinc/intellij-community
|
python/testData/inspections/PyMissingTypeHintsInspection/noAnnotations.py
|
Python
|
apache-2.0
| 108
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. codeauthor:: Cédric Dumay <cedric.dumay@gmail.com>
"""
import uuid
from flask.signals import Namespace
from flask.blueprints import Blueprint
from kazoo.client import KazooClient
from kazoo.security import make_digest_acl
try:
# noinspection PyProtectedMember
from flask import _app_ctx_stack as stack
except ImportError:
# noinspection PyProtectedMember
from flask import _request_ctx_stack as stack
__all__ = (
'FlaskZookeeperClient',
)
connection_state_changed = Namespace().signal('state-change')
class FlaskZookeeperClient(object):
def __init__(self, app=None):
self.uuid = str(uuid.uuid4())
self.app = None
self.blueprint = None
self.blueprint_setup = None
if app is not None:
self.app = app
self.init_app(app)
def init_app(self, app):
if isinstance(app, Blueprint):
app.record(self._deferred_blueprint_init)
else:
self._init_app(app)
def _deferred_blueprint_init(self, setup_state):
self._init_app(setup_state.app)
def _init_app(self, app):
"""Initialize the `app` for use with this
:class:`~FlaskZookeeperClient`. This is called automatically if `app`
is passed to :meth:`~FlaskZookeeperClient.__init__`.
:param app: Flask application instance.
"""
self.app = app
self.app.config.setdefault('KAZOO_HOSTS', '127.0.0.1:2181')
self.app.config.setdefault('KAZOO_START_TIMEOUT', 15)
self.app.config.setdefault('KAZOO_SESSION_TIMEOUT', 10.0)
self.app.config.setdefault('KAZOO_RETRY', {'max_delay': 3600}) # 1 hour
if hasattr(self.app, 'teardown_appcontext'):
self.app.teardown_appcontext(self.teardown)
else:
self.app.teardown_request(self.teardown)
def connect(self):
"""Initialize a connection to the Zookeeper quorum.
:return: Kazoo client object as connection.
"""
client_kwargs = dict(
hosts=self.app.config['KAZOO_HOSTS'],
timeout=self.app.config['KAZOO_SESSION_TIMEOUT'],
connection_retry=self.app.config['KAZOO_RETRY'],
command_retry=self.app.config['KAZOO_RETRY']
)
# is ACL ?
username = self.app.config.get('KAZOO_ACL_USERNAME', None)
password = self.app.config.get('KAZOO_ACL_PASSWORD', None)
if username and password:
client_kwargs.update(dict(
default_acl=[
make_digest_acl(
username=username,
password=password,
read=self.app.config.get(
'KAZOO_ACL_READ', False
),
write=self.app.config.get(
'KAZOO_ACL_WRITE', False
),
create=self.app.config.get(
'KAZOO_ACL_CREATE', False
),
delete=self.app.config.get(
'KAZOO_ACL_DELETE', False
),
admin=self.app.config.get(
'KAZOO_ACL_ADMIN', False
),
all=self.app.config.get(
'KAZOO_ACL_ALL', False
)
)
],
auth_data=[("digest", ":".join((username, password)))],
))
client = KazooClient(**client_kwargs)
client.start(timeout=self.app.config['KAZOO_START_TIMEOUT'])
client.add_listener(self.connection_state_listener)
return client
# noinspection PyUnusedLocal
def teardown(self, exception):
ctx = stack.top
client = getattr(ctx, self.uuid, None)
if client:
client.stop()
client.close()
del client
setattr(ctx, self.uuid, None)
setattr(self, self.uuid, None)
@property
def connection(self):
ctx = stack.top
if ctx is not None:
if not hasattr(ctx, self.uuid):
setattr(ctx, self.uuid, self.connect())
return getattr(ctx, self.uuid)
def connection_state_listener(self, state):
"""Publishes state changes to a Flask signal"""
connection_state_changed.send(self, state=state)
|
cdumay/flask-zookeeper
|
src/flask_zookeeper.py
|
Python
|
apache-2.0
| 4,510
|
############################################
# Copyright (c) 2012 Microsoft Corporation
#
# Extract enumeration types from z3_api.h
#
# Author: Leonardo de Moura (leonardo)
############################################
import re
import os
blank_pat = re.compile("^ *$")
comment_pat = re.compile("^ *//.*$")
typedef_pat = re.compile("typedef enum *")
typedef2_pat = re.compile("typedef enum { *")
openbrace_pat = re.compile("{ *")
closebrace_pat = re.compile("}.*;")
api = open('..%slib%sz3_api.h' % (os.sep, os.sep), 'r')
z3consts = open('z3consts.py', 'w')
z3consts.write('# Automatically generated file, generator: mk_z3consts.py\n\n')
SEARCHING = 0
FOUND_ENUM = 1
IN_ENUM = 2
mode = SEARCHING
decls = {}
idx = 0
linenum = 1
for line in api:
m1 = blank_pat.match(line)
m2 = comment_pat.match(line)
if m1 or m2:
# skip blank lines and comments
linenum = linenum + 1
elif mode == SEARCHING:
m = typedef_pat.match(line)
if m:
mode = FOUND_ENUM
m = typedef2_pat.match(line)
if m:
mode = IN_ENUM
decls = {}
idx = 0
elif mode == FOUND_ENUM:
m = openbrace_pat.match(line)
if m:
mode = IN_ENUM
decls = {}
idx = 0
else:
assert False, "Invalid z3_api.h, line: %s" % linenum
else:
assert mode == IN_ENUM
words = re.split('[^\-a-zA-Z0-9_]+', line)
m = closebrace_pat.match(line)
if m:
name = words[1]
z3consts.write('# enum %s\n' % name)
for k, i in decls.iteritems():
z3consts.write('%s = %s\n' % (k, i))
z3consts.write('\n')
mode = SEARCHING
else:
if words[2] != '':
if len(words[2]) > 1 and words[2][1] == 'x':
idx = int(words[2], 16)
else:
idx = int(words[2])
decls[words[1]] = idx
idx = idx + 1
linenum = linenum + 1
|
rychipman/858-labs
|
symex/z3py/mk_z3consts.py
|
Python
|
mit
| 2,073
|
import random
print(random.sample(range(1,1000),200))
|
DavidFHCh/MiRecocidoRecalentadoSimulado
|
resources/rand.py
|
Python
|
gpl-3.0
| 56
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
__author__ = 'Shamal Faily'
class DataFlow:
def __init__(self,dfName,envName,fromName,fromType,toName,toType,dfAssets):
self.theName = dfName
self.theEnvironmentName = envName
self.theFromName = fromName
self.theFromType = fromType
self.theToName = toName
self.theToType = toType
self.theAssets = dfAssets
def name(self): return self.theName
def environment(self): return self.theEnvironmentName
def fromName(self): return self.theFromName
def fromType(self): return self.theFromType
def toName(self): return self.theToName
def toType(self): return self.theToType
def assets(self): return self.theAssets
|
nathanbjenx/cairis
|
cairis/core/DataFlow.py
|
Python
|
apache-2.0
| 1,448
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class IrModel(models.Model):
_inherit = 'ir.model'
is_mail_thread_sms = fields.Boolean(
string="Mail Thread SMS", default=False,
store=False, compute='_compute_is_mail_thread_sms', search='_search_is_mail_thread_sms',
help="Whether this model supports messages and notifications through SMS",
)
@api.depends('is_mail_thread')
def _compute_is_mail_thread_sms(self):
for model in self:
if model.is_mail_thread:
ModelObject = self.env[model.model]
potential_fields = ModelObject._sms_get_number_fields() + ModelObject._sms_get_partner_fields()
if any(fname in ModelObject._fields for fname in potential_fields):
model.is_mail_thread_sms = True
continue
model.is_mail_thread_sms = False
def _search_is_mail_thread_sms(self, operator, value):
thread_models = self.search([('is_mail_thread', '=', True)])
valid_models = self.env['ir.model']
for model in thread_models:
ModelObject = self.env[model.model]
potential_fields = ModelObject._sms_get_number_fields() + ModelObject._sms_get_partner_fields()
if any(fname in ModelObject._fields for fname in potential_fields):
valid_models |= model
search_sms = (operator == '=' and value) or (operator == '!=' and not value)
if search_sms:
return [('id', 'in', valid_models.ids)]
return [('id', 'not in', valid_models.ids)]
|
ddico/odoo
|
addons/sms/models/ir_model.py
|
Python
|
agpl-3.0
| 1,684
|
#!/usr/bin/env python2.7
# -*- coding: UTF-8 -*-
# © Copyright 2013 axujen, <axujen at gmail.com>. All Rights Reserved.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, json
from ConfigParser import ConfigParser
from argparse import ArgumentParser
from ast import literal_eval
# Config defaults.
config = ConfigParser()
config_base = '$HOME/.gentoobot/'
config_base = os.path.normpath(os.path.expandvars(os.path.expanduser(config_base)))
stored_conf = {}
config.add_section('LASTFM')
config.set('LASTFM', 'api_pub', 'af6a640a95ace00ee058282b70846ba0')
config.set('LASTFM', 'api_secret', '1f8545b425f3f0de178a61c974732057')
config.add_section('CONNECTION')
config.set('CONNECTION', 'channel', '#Gentoobot')
config.set('CONNECTION', 'nick', 'GentooTestBot')
config.set('CONNECTION', 'password', 'None')
config.set('CONNECTION', 'port', '6667')
config.set('CONNECTION', 'reconnect', '5')
config.set('CONNECTION', 'server', 'irc.rizon.net')
config.add_section('OPTIONS')
config.set('OPTIONS', 'chattiness', '5')
config.set('OPTIONS', 'verbose', 'False')
# Arguments
arguments = ArgumentParser(argument_default=None)
arguments.add_argument('-s', '--server', dest='server', help='irc server to connect to')
arguments.add_argument('-p', '--port', type=int, dest='port', help='server port')
arguments.add_argument('-c', '--channel', dest='channel', help='channel to join')
arguments.add_argument('-n', '--nick', dest='nick', help="bot's nick")
arguments.add_argument('--pass', dest='password', help='Bots login password')
arguments.add_argument('-r', '--reconnect', dest='reconnect', type=int,
help='reconnection interval when kicked from a channel or when disconnected')
arguments.add_argument('--config', dest='config', default=config_base,
help='specify an alternative config folder')
arguments.add_argument('-v', '--verbose', dest='verbose', action='store_true',
default=None, help='Turn on verbose output')
from gentoobot.logger import logger
def get_config(section):
"""Return a dictionary with options necessary for running the bot"""
global config_base, stored_conf
section = section.upper()
if not section in stored_conf:
args = vars(arguments.parse_args())
config_base = os.path.normpath(os.path.expanduser(os.path.expandvars(args['config'])))
configfile = os.path.join(config_base, 'config.cfg')
if not os.path.exists(config_base):
logger.warning('Creating new configuration directory %s' % config_base)
os.makedirs(config_base)
if not os.path.isdir(config_base):
raise ValueError('%s is not a directory' % config_base)
if not os.path.exists(configfile):
logger.warning('Creating new configuration file "%s"' % configfile)
with open(configfile, 'wb') as f:
config.write(f)
else:
logger.warning('Loading configuration from %s' % configfile)
config.read(configfile)
for section in config.sections():
options = dict(config.items(section))
for opt in options:
try:
options[opt] = literal_eval(options[opt])
except (ValueError, SyntaxError):
continue
for arg in args:
if not args[arg] == None:
options[arg] = args[arg]
stored_conf[section] = options
return stored_conf[section]
def save_db(server, db, object):
"""Save a database (json) inside a folder."""
folder = os.path.join(config_base, server)
if not os.path.exists(folder):
logger.warning('Creating new server db folder "%s"' % folder)
os.makedirs(folder)
if not os.path.isdir(folder):
raise ValueError('"%s" is not a directory' % folder)
db_file = os.path.join(folder, db)
if os.path.exists(db_file) and not os.path.isfile(db_file):
raise ValueError('"%s" is not a file' % db_file)
with open(db_file, 'w') as db_file:
json.dump(object, db_file, indent=4)
def load_db(server, db):
"""Load a json database."""
db_file = os.path.join(config_base, server, db)
logger.info('Loading '+db_file)
if not os.path.exists(db_file) or not os.path.isfile(db_file):
return None
with open(db_file, 'r') as db_file:
return json.load(db_file)
|
axujen/gentoo-bot
|
src/gentoobot/config.py
|
Python
|
gpl-3.0
| 4,620
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^ingredients/$', views.get_all_ingredients, name='detail'),
url(r'^ingredients/(?P<question_id>[A-Za-z0-9]+)/$', views.get_price_by_name, name='detail'),
]
|
DaBbleR23/Pizza-web
|
pizzaweb_backend/pizzaweb/backend/urls.py
|
Python
|
mit
| 240
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for embeddings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
# Imports gradient definitions.
from tensorflow.python.ops import data_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
def _clip(params, ids, max_norm):
"""Helper function for _embedding_lookup_and_transform.
This function optionally clips embeddings to an l2-norm of max_norm.
Args:
params: A `Tensor` of embeddings retrieved by `gather`.
ids: The `ids` argument that was passed to `gather`.
max_norm: If not `None`, each embedding is clipped if its l2-norm is
larger than this value.
Returns:
A `Tensor` with the same type as `params`.
"""
def _rank(x):
"""Helper function to retrieve the rank of a tensor.
Args:
x: Something convertible to `Tensor`.
Returns:
Either a pair `(rank, True)` where `rank` is an integer or a pair
`(rank, False)` where `rank` is an integer `Tensor`. In either case,
`rank` is the rank of `x`.
"""
rank = ops.convert_to_tensor(x).get_shape().ndims
if rank:
return rank, True
else:
return array_ops.rank(x), False
if max_norm is None:
return params
ids_rank, ids_static = _rank(ids)
params_rank, params_static = _rank(params)
return clip_ops.clip_by_norm(
params,
max_norm,
axes=(list(range(ids_rank, params_rank))
if ids_static and params_static
else math_ops.range(ids_rank, params_rank)))
def _embedding_lookup_and_transform(params,
ids,
partition_strategy="mod",
name=None,
max_norm=None,
transform_fn=None):
"""Helper function for embedding_lookup and _compute_sampled_logits.
This function is a generalization of embedding_lookup that optionally
applies a caller-specified transformation to each embedding. This is
done through the `transform_fn` argument. If provided, the function is
applied to each partitioned tensor of retrieved embeddings, colocated
with the embeddings. This function will be called with a single `Tensor`
argument of the same type as the `params` tensor and should return a
`Tensor`. The shape of the argument will be the same as `params` except
for the size of the first dimension. The first dimension of the result's
shape must be the same size as the argument's.
Args:
params: See embedding_lookup.
ids: See embedding_lookup.
partition_strategy: See embedding_lookup.
name: See embedding_lookup.
max_norm: See embedding_lookup.
transform_fn: An optional function to apply to each retrieved embedding.
If max_norm is provided, transform_fn is applied to the norm-limited
embeddings.
Returns:
See embedding_lookup for details.
Raises:
ValueError: If `params` is empty.
"""
if params is None or params in ((), []):
raise ValueError("Need at least one param")
if isinstance(params, variables.PartitionedVariable):
params = list(params) # Iterate to get the underlying Variables.
if not isinstance(params, list):
params = [params]
with ops.name_scope(name, "embedding_lookup", params + [ids]) as name:
np = len(params) # Number of partitions
# Preserve the resource variable status to avoid accidental dense reads.
if not any(
isinstance(p, resource_variable_ops.ResourceVariable) for p in params):
params = ops.convert_n_to_tensor_or_indexed_slices(params, name="params")
ids = ops.convert_to_tensor(ids, name="ids")
if np == 1 and (not transform_fn or ids.get_shape().ndims == 1):
with ops.colocate_with(params[0]):
result = _clip(array_ops.gather(params[0], ids, name=name),
ids, max_norm)
if transform_fn:
result = transform_fn(result)
# Make sure the final result does not have colocation contraints on the
# params. Similar to the case np > 1 where parallel_dynamic_stitch is
# outside the scioe of all with ops.colocate_with(params[p]).
return array_ops.identity(result)
else:
# Flatten the ids. There are two cases where we need to do this.
# - There is more than one params tensor.
# - There is a transform_fn and ids is not statically known to be 1-D.
# We must flatten in this case because transform_fn expects a flat
# tensor of embeddings.
flat_ids = array_ops.reshape(ids, [-1])
original_indices = math_ops.range(array_ops.size(flat_ids))
# Create p_assignments and set new_ids depending on the strategy.
if partition_strategy == "mod":
p_assignments = flat_ids % np
new_ids = flat_ids // np
elif partition_strategy == "div":
# Compute num_total_ids as the sum of dim-0 of params, then assign to
# partitions based on a constant number of ids per partition. Optimize
# if we already know the full shape statically.
dim_0_size = tensor_shape.Dimension(tensor_shape.dimension_value(
params[0].get_shape()[0]))
for p in xrange(1, np):
dim_0_size += tensor_shape.Dimension(tensor_shape.dimension_value(
params[p].get_shape()[0]))
if dim_0_size.value:
num_total_ids = constant_op.constant(dim_0_size.value, flat_ids.dtype)
else:
dim_0_sizes = []
for p in xrange(np):
param_p_dim = tensor_shape.dimension_value(params[p].get_shape()[0])
if param_p_dim is not None:
dim_0_sizes.append(param_p_dim)
else:
with ops.colocate_with(params[p]):
dim_0_sizes.append(array_ops.shape(params[p])[0])
num_total_ids = math_ops.reduce_sum(
math_ops.cast(array_ops.stack(dim_0_sizes), flat_ids.dtype))
ids_per_partition = num_total_ids // np
extras = num_total_ids % np
p_assignments = math_ops.maximum(
flat_ids // (ids_per_partition + 1),
(flat_ids - extras) // ids_per_partition)
# Emulate a conditional using a boolean indicator tensor
new_ids = array_ops.where(p_assignments < extras,
flat_ids % (ids_per_partition + 1),
(flat_ids - extras) % ids_per_partition)
else:
raise ValueError("Unrecognized partition strategy: " +
partition_strategy)
# Cast partition assignments to int32 for use in dynamic_partition.
# There really should not be more than 2^32 partitions.
p_assignments = math_ops.cast(p_assignments, dtypes.int32)
# Partition list of ids based on assignments into np separate lists
gather_ids = data_flow_ops.dynamic_partition(new_ids, p_assignments, np)
# Similarly, partition the original indices.
pindices = data_flow_ops.dynamic_partition(original_indices,
p_assignments, np)
# Do np separate lookups, finding embeddings for plist[p] in params[p]
partitioned_result = []
for p in xrange(np):
pids = gather_ids[p]
with ops.colocate_with(params[p]):
result = array_ops.gather(params[p], pids)
if transform_fn:
# If transform_fn is provided, the clip_by_norm precedes
# the transform and hence must be co-located. See below
# for the counterpart if transform_fn is not proveded.
result = transform_fn(_clip(result, pids, max_norm))
partitioned_result.append(result)
# Stitch these back together
ret = data_flow_ops.parallel_dynamic_stitch(
pindices, partitioned_result, name=name)
# Determine the static element shape.
if transform_fn is None:
element_shape_s = params[0].get_shape()[1:]
for p in params[1:]:
element_shape_s = element_shape_s.merge_with(p.get_shape()[1:])
else:
element_shape_s = ret.get_shape()[1:]
# Compute the dynamic element shape.
if element_shape_s.is_fully_defined():
element_shape_d = element_shape_s
elif transform_fn is None:
# It's important that we compute params[0].shape on the right device
# to avoid data motion.
with ops.colocate_with(params[0]):
params_shape = array_ops.shape(params[0])
element_shape_d = params_shape[1:]
else:
element_shape_d = array_ops.shape(ret)[1:]
# Reshape to reverse the flattening of ids.
ret = array_ops.reshape(ret,
array_ops.concat(
[array_ops.shape(ids), element_shape_d], 0))
# Normally the reshape is sufficient, but setting shape explicitly
# teaches shape inference that params[1:].get_shape() matters
# (in the case that transform_fn is None).
ret.set_shape(ids.get_shape().concatenate(element_shape_s))
if not transform_fn:
# If transform_fn was provided, the clip_by_norm was done above.
ret = _clip(ret, ids, max_norm)
return ret
@tf_export(v1=["nn.embedding_lookup"])
def embedding_lookup(
params,
ids,
partition_strategy="mod",
name=None,
validate_indices=True, # pylint: disable=unused-argument
max_norm=None):
"""Looks up `ids` in a list of embedding tensors.
This function is used to perform parallel lookups on the list of
tensors in `params`. It is a generalization of
`tf.gather`, where `params` is
interpreted as a partitioning of a large embedding tensor. `params` may be
a `PartitionedVariable` as returned by using `tf.get_variable()` with a
partitioner.
If `len(params) > 1`, each element `id` of `ids` is partitioned between
the elements of `params` according to the `partition_strategy`.
In all strategies, if the id space does not evenly divide the number of
partitions, each of the first `(max_id + 1) % len(params)` partitions will
be assigned one more id.
If `partition_strategy` is `"mod"`, we assign each id to partition
`p = id % len(params)`. For instance,
13 ids are split across 5 partitions as:
`[[0, 5, 10], [1, 6, 11], [2, 7, 12], [3, 8], [4, 9]]`
If `partition_strategy` is `"div"`, we assign ids to partitions in a
contiguous manner. In this case, 13 ids are split across 5 partitions as:
`[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]`
The results of the lookup are concatenated into a dense
tensor. The returned tensor has shape `shape(ids) + shape(params)[1:]`.
Args:
params: A single tensor representing the complete embedding tensor,
or a list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors. Alternatively, a
`PartitionedVariable`, created by partitioning along dimension 0. Each
element must be appropriately sized for the given `partition_strategy`.
ids: A `Tensor` with type `int32` or `int64` containing the ids to be looked
up in `params`.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`.
name: A name for the operation (optional).
validate_indices: DEPRECATED. If this operation is assigned to CPU, values
in `indices` are always validated to be within range. If assigned to GPU,
out-of-bound indices result in safe but unspecified behavior, which may
include raising an error.
max_norm: If not `None`, each embedding is clipped if its l2-norm is
larger than this value.
Returns:
A `Tensor` with the same type as the tensors in `params`.
Raises:
ValueError: If `params` is empty.
"""
return _embedding_lookup_and_transform(
params=params,
ids=ids,
partition_strategy=partition_strategy,
name=name,
max_norm=max_norm,
transform_fn=None)
@tf_export("nn.embedding_lookup", v1=[])
def embedding_lookup_v2(
params,
ids,
partition_strategy="mod",
max_norm=None,
name=None):
"""Looks up `ids` in a list of embedding tensors.
This function is used to perform parallel lookups on the list of
tensors in `params`. It is a generalization of
`tf.gather`, where `params` is
interpreted as a partitioning of a large embedding tensor. `params` may be
a `PartitionedVariable` as returned by using `tf.get_variable()` with a
partitioner.
If `len(params) > 1`, each element `id` of `ids` is partitioned between
the elements of `params` according to the `partition_strategy`.
In all strategies, if the id space does not evenly divide the number of
partitions, each of the first `(max_id + 1) % len(params)` partitions will
be assigned one more id.
If `partition_strategy` is `"mod"`, we assign each id to partition
`p = id % len(params)`. For instance,
13 ids are split across 5 partitions as:
`[[0, 5, 10], [1, 6, 11], [2, 7, 12], [3, 8], [4, 9]]`
If `partition_strategy` is `"div"`, we assign ids to partitions in a
contiguous manner. In this case, 13 ids are split across 5 partitions as:
`[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]`
The results of the lookup are concatenated into a dense
tensor. The returned tensor has shape `shape(ids) + shape(params)[1:]`.
Args:
params: A single tensor representing the complete embedding tensor,
or a list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors. Alternatively, a
`PartitionedVariable`, created by partitioning along dimension 0. Each
element must be appropriately sized for the given `partition_strategy`.
ids: A `Tensor` with type `int32` or `int64` containing the ids to be looked
up in `params`.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`.
max_norm: If not `None`, each embedding is clipped if its l2-norm is
larger than this value.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as the tensors in `params`.
Raises:
ValueError: If `params` is empty.
"""
return embedding_lookup(params, ids, partition_strategy, name,
max_norm=max_norm)
@tf_export(v1=["nn.embedding_lookup_sparse"])
def embedding_lookup_sparse(params,
sp_ids,
sp_weights,
partition_strategy="mod",
name=None,
combiner=None,
max_norm=None):
"""Computes embeddings for the given ids and weights.
This op assumes that there is at least one id for each row in the dense tensor
represented by sp_ids (i.e. there are no rows with empty features), and that
all the indices of sp_ids are in canonical row-major order.
It also assumes that all id values lie in the range [0, p0), where p0
is the sum of the size of params along dimension 0.
Args:
params: A single tensor representing the complete embedding tensor,
or a list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors. Alternatively, a
`PartitionedVariable`, created by partitioning along dimension 0. Each
element must be appropriately sized for the given `partition_strategy`.
sp_ids: N x M `SparseTensor` of int64 ids where N is typically batch size
and M is arbitrary.
sp_weights: either a `SparseTensor` of float / double weights, or `None` to
indicate all weights should be taken to be 1. If specified, `sp_weights`
must have exactly the same shape and indices as `sp_ids`.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: Optional name for the op.
combiner: A string specifying the reduction op. Currently "mean", "sqrtn"
and "sum" are supported.
"sum" computes the weighted sum of the embedding results for each row.
"mean" is the weighted sum divided by the total weight.
"sqrtn" is the weighted sum divided by the square root of the sum of the
squares of the weights.
max_norm: If not `None`, each embedding is clipped if its l2-norm is
larger than this value, before combining.
Returns:
A dense tensor representing the combined embeddings for the
sparse ids. For each row in the dense tensor represented by `sp_ids`, the op
looks up the embeddings for all ids in that row, multiplies them by the
corresponding weight, and combines these embeddings as specified.
In other words, if
`shape(combined params) = [p0, p1, ..., pm]`
and
`shape(sp_ids) = shape(sp_weights) = [d0, d1, ..., dn]`
then
`shape(output) = [d0, d1, ..., dn-1, p1, ..., pm]`.
For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are
```python
[0, 0]: id 1, weight 2.0
[0, 1]: id 3, weight 0.5
[1, 0]: id 0, weight 1.0
[2, 3]: id 1, weight 3.0
```
with `combiner`="mean", then the output will be a 3x20 matrix where
```python
output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5)
output[1, :] = (params[0, :] * 1.0) / 1.0
output[2, :] = (params[1, :] * 3.0) / 3.0
```
Raises:
TypeError: If `sp_ids` is not a `SparseTensor`, or if `sp_weights` is
neither `None` nor `SparseTensor`.
ValueError: If `combiner` is not one of {"mean", "sqrtn", "sum"}.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if combiner not in ("mean", "sqrtn", "sum"):
raise ValueError("combiner must be one of 'mean', 'sqrtn' or 'sum'")
if isinstance(params, variables.PartitionedVariable):
params = list(params) # Iterate to get the underlying Variables.
if not isinstance(params, list):
params = [params]
if not isinstance(sp_ids, sparse_tensor.SparseTensor):
raise TypeError("sp_ids must be SparseTensor")
ignore_weights = sp_weights is None
if not ignore_weights:
if not isinstance(sp_weights, sparse_tensor.SparseTensor):
raise TypeError("sp_weights must be either None or SparseTensor")
sp_ids.values.get_shape().assert_is_compatible_with(
sp_weights.values.get_shape())
sp_ids.indices.get_shape().assert_is_compatible_with(
sp_weights.indices.get_shape())
sp_ids.dense_shape.get_shape().assert_is_compatible_with(
sp_weights.dense_shape.get_shape())
# TODO(yleon): Add enhanced node assertions to verify that sp_ids and
# sp_weights have equal indices and shapes.
with ops.name_scope(name, "embedding_lookup_sparse",
params + [sp_ids]) as name:
segment_ids = sp_ids.indices[:, 0]
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
ids = sp_ids.values
ids, idx = array_ops.unique(ids)
embeddings = embedding_lookup(
params, ids, partition_strategy=partition_strategy, max_norm=max_norm)
if embeddings.dtype in (dtypes.float16, dtypes.bfloat16):
embeddings = math_ops.to_float(embeddings)
if not ignore_weights:
weights = sp_weights.values
if weights.dtype != embeddings.dtype:
weights = math_ops.cast(weights, embeddings.dtype)
embeddings = array_ops.gather(embeddings, idx)
# Reshape weights to allow broadcast
ones = array_ops.fill(
array_ops.expand_dims(array_ops.rank(embeddings) - 1, 0), 1)
bcast_weights_shape = array_ops.concat([array_ops.shape(weights), ones],
0)
orig_weights_shape = weights.get_shape()
weights = array_ops.reshape(weights, bcast_weights_shape)
# Set the weight shape, since after reshaping to bcast_weights_shape,
# the shape becomes None.
if embeddings.get_shape().ndims is not None:
weights.set_shape(
orig_weights_shape.concatenate(
[1 for _ in range(embeddings.get_shape().ndims - 1)]))
embeddings *= weights
if combiner == "sum":
embeddings = math_ops.segment_sum(embeddings, segment_ids, name=name)
elif combiner == "mean":
embeddings = math_ops.segment_sum(embeddings, segment_ids)
weight_sum = math_ops.segment_sum(weights, segment_ids)
embeddings = math_ops.div(embeddings, weight_sum, name=name)
elif combiner == "sqrtn":
embeddings = math_ops.segment_sum(embeddings, segment_ids)
weights_squared = math_ops.pow(weights, 2)
weight_sum = math_ops.segment_sum(weights_squared, segment_ids)
weight_sum_sqrt = math_ops.sqrt(weight_sum)
embeddings = math_ops.div(embeddings, weight_sum_sqrt, name=name)
else:
assert False, "Unrecognized combiner"
else:
assert idx is not None
if combiner == "sum":
embeddings = math_ops.sparse_segment_sum(
embeddings, idx, segment_ids, name=name)
elif combiner == "mean":
embeddings = math_ops.sparse_segment_mean(
embeddings, idx, segment_ids, name=name)
elif combiner == "sqrtn":
embeddings = math_ops.sparse_segment_sqrt_n(
embeddings, idx, segment_ids, name=name)
else:
assert False, "Unrecognized combiner"
return embeddings
@tf_export("nn.embedding_lookup_sparse", v1=[])
def embedding_lookup_sparse_v2(params,
sp_ids,
sp_weights,
partition_strategy="mod",
combiner=None,
max_norm=None,
name=None):
return embedding_lookup_sparse_v2(
params, sp_ids, sp_weights, partition_strategy, name, combiner, max_norm)
embedding_lookup_sparse_v2.__doc__ = embedding_lookup_sparse.__doc__
@tf_export("nn.safe_embedding_lookup_sparse", v1=[])
def safe_embedding_lookup_sparse_v2(embedding_weights,
sparse_ids,
sparse_weights=None,
combiner="mean",
default_id=None,
max_norm=None,
name=None):
"""Lookup embedding results, accounting for invalid IDs and empty features.
The partitioned embedding in `embedding_weights` must all be the same shape
except for the first dimension. The first dimension is allowed to vary as the
vocabulary size is not necessarily a multiple of `P`. `embedding_weights`
may be a `PartitionedVariable` as returned by using `tf.get_variable()` with a
partitioner.
Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs
with non-positive weight. For an entry with no features, the embedding vector
for `default_id` is returned, or the 0-vector if `default_id` is not supplied.
The ids and weights may be multi-dimensional. Embeddings are always aggregated
along the last dimension.
Note: when doing embedding lookup on `embedding_weights`, "div" partition
strategy will be used. Support for other partition strategy will be added
later.
Args:
embedding_weights: A list of `P` float `Tensor`s or values representing
partitioned embedding `Tensor`s. Alternatively, a `PartitionedVariable`
created by partitioning along dimension 0. The total unpartitioned shape
should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the vocab size
and `e_1, ..., e_m` are the embedding dimensions.
sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
ids. `d_0` is typically batch size.
sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing
float weights corresponding to `sparse_ids`, or `None` if all weights are
be assumed to be 1.0.
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the
default.
default_id: The id to use for an entry with no features.
max_norm: If not `None`, all embeddings are l2-normalized to max_norm before
combining.
name: A name for this operation (optional).
Returns:
Dense `Tensor` of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`.
Raises:
ValueError: if `embedding_weights` is empty.
"""
return safe_embedding_lookup_sparse(
embedding_weights,
sparse_ids,
sparse_weights=sparse_weights,
combiner=combiner,
default_id=default_id,
name=name,
partition_strategy="div",
max_norm=max_norm)
@tf_export(v1=["nn.safe_embedding_lookup_sparse"])
def safe_embedding_lookup_sparse(embedding_weights,
sparse_ids,
sparse_weights=None,
combiner='mean',
default_id=None,
name=None,
partition_strategy='div',
max_norm=None):
"""Lookup embedding results, accounting for invalid IDs and empty features.
The partitioned embedding in `embedding_weights` must all be the same shape
except for the first dimension. The first dimension is allowed to vary as the
vocabulary size is not necessarily a multiple of `P`. `embedding_weights`
may be a `PartitionedVariable` as returned by using `tf.get_variable()` with a
partitioner.
Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs
with non-positive weight. For an entry with no features, the embedding vector
for `default_id` is returned, or the 0-vector if `default_id` is not supplied.
The ids and weights may be multi-dimensional. Embeddings are always aggregated
along the last dimension.
Args:
embedding_weights: A list of `P` float `Tensor`s or values representing
partitioned embedding `Tensor`s. Alternatively, a `PartitionedVariable`
created by partitioning along dimension 0. The total unpartitioned
shape should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the
vocab size and `e_1, ..., e_m` are the embedding dimensions.
sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
ids. `d_0` is typically batch size.
sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing
float weights corresponding to `sparse_ids`, or `None` if all weights
are be assumed to be 1.0.
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean"
the default.
default_id: The id to use for an entry with no features.
name: A name for this operation (optional).
partition_strategy: A string specifying the partitioning strategy.
Currently `"div"` and `"mod"` are supported. Default is `"div"`.
max_norm: If not `None`, all embeddings are l2-normalized to max_norm before
combining.
Returns:
Dense `Tensor` of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`.
Raises:
ValueError: if `embedding_weights` is empty.
"""
if embedding_weights is None:
raise ValueError('Missing embedding_weights %s.' % embedding_weights)
if isinstance(embedding_weights, variables.PartitionedVariable):
embedding_weights = list(embedding_weights) # get underlying Variables.
if not isinstance(embedding_weights, list):
embedding_weights = [embedding_weights]
if len(embedding_weights) < 1:
raise ValueError('Missing embedding_weights %s.' % embedding_weights)
dtype = sparse_weights.dtype if sparse_weights is not None else None
embedding_weights = [
w if (isinstance(w, resource_variable_ops.ResourceVariable)
and dtype in (None, w.dtype))
else ops.convert_to_tensor(w, dtype=dtype)
for w in embedding_weights
]
with ops.name_scope(name, 'embedding_lookup',
embedding_weights + [sparse_ids,
sparse_weights]) as scope:
# Reshape higher-rank sparse ids and weights to linear segment ids.
original_shape = sparse_ids.dense_shape
original_rank_dim = tensor_shape.dimension_value(
sparse_ids.dense_shape.get_shape()[0])
original_rank = (
array_ops.size(original_shape)
if original_rank_dim is None
else original_rank_dim)
sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [
math_ops.reduce_prod(
array_ops.slice(original_shape, [0], [original_rank - 1])),
array_ops.gather(original_shape, original_rank - 1)])
if sparse_weights is not None:
sparse_weights = sparse_tensor.SparseTensor(
sparse_ids.indices,
sparse_weights.values, sparse_ids.dense_shape)
# Prune invalid ids and weights.
sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights)
if combiner != 'sum':
sparse_ids, sparse_weights = _prune_invalid_weights(
sparse_ids, sparse_weights)
# Fill in dummy values for empty features, if necessary.
sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(sparse_ids,
default_id or
0)
if sparse_weights is not None:
sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0)
result = embedding_lookup_sparse(
embedding_weights,
sparse_ids,
sparse_weights,
combiner=combiner,
partition_strategy=partition_strategy,
name=None if default_id is None else scope,
max_norm=max_norm)
if default_id is None:
# Broadcast is_row_empty to the same shape as embedding_lookup_result,
# for use in Select.
is_row_empty = array_ops.tile(
array_ops.reshape(is_row_empty, [-1, 1]),
array_ops.stack([1, array_ops.shape(result)[1]]))
result = array_ops.where(is_row_empty,
array_ops.zeros_like(result),
result,
name=scope)
# Reshape back from linear ids back into higher-dimensional dense result.
final_result = array_ops.reshape(
result,
array_ops.concat([
array_ops.slice(
math_ops.cast(original_shape, dtypes.int32), [0],
[original_rank - 1]),
array_ops.slice(array_ops.shape(result), [1], [-1])
], 0))
final_result.set_shape(tensor_shape.unknown_shape(
(tensor_shape.Dimension(original_rank_dim) - 1).value).concatenate(
result.get_shape()[1:]))
return final_result
def _prune_invalid_ids(sparse_ids, sparse_weights):
"""Prune invalid IDs (< 0) from the input ids and weights."""
is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
if sparse_weights is not None:
is_id_valid = math_ops.logical_and(
is_id_valid,
array_ops.ones_like(sparse_weights.values, dtype=dtypes.bool))
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
if sparse_weights is not None:
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
return sparse_ids, sparse_weights
def _prune_invalid_weights(sparse_ids, sparse_weights):
"""Prune invalid weights (< 0) from the input ids and weights."""
if sparse_weights is not None:
is_weights_valid = math_ops.greater(sparse_weights.values, 0)
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_weights_valid)
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_weights_valid)
return sparse_ids, sparse_weights
|
hfp/tensorflow-xsmm
|
tensorflow/python/ops/embedding_ops.py
|
Python
|
apache-2.0
| 33,826
|
"""
format(No nest):
- + * [](link "title2")
[](link "title2")
* [](link "title2")
[](link "title2")
+ * [](link "title2")
[](link "title2")
* [](link "title2")
[](link "title2")
- + * [](link "title2")
[](link "title2")
* [](link "title2")
[](link "title2")
+ * [](link "title2")
[](link "title2")
* [](link "title2")
[](link "title2")
result:
<ul data-am-widget="gallery" class="am-gallery am-avg-sm-{*} am-avg-md-{+} am-avg-lg-{-} am-gallery-bordered" data-am-gallery="{pureview:{target: 'a', weChatImagePreview: false}}" >
<li>
<div class="am-gallery-item">
<a href="link">
<img src="preview-link" alt="img2"/>
<h6 class="am-gallery-title">title2</h6>
</a>
</div>
</li>
...
</ul>
"""
import re
import logging
from markdown.blockprocessors import BlockProcessor
from markdown import Extension
from markdown.util import etree
logger = logging.getLogger('MARKDOWN.list_gallery')
class ListGalleryProcesserProcesser(BlockProcessor):
# _IMG_RE = ImageBlockProcesser._IMG_RE
# _LINK_IMG_RE = LinkImageBlockProcesser._LINK_IMG_RE
_IMG_RE = re.compile(
r'\[\s?'
r'!\[(?P<alt>[\s\S]*?)\]' # ![] img alt
r'('
r'\('
r'(?P<src>.+?)'
r'(\s+'
r'(?P<title>.+?)'
r')?\s?'
r'\)'
r'|'
r'\[(?P<ref>.*?)\]'
r')' # img src
r'\s?\]' # [] link text
r'('
r'\('
r'(?P<preview>.*?)'
r"""(\s+['"]"""
r'(?P<preview_title>.+?)'
r"""['"])?\s?"""
r'\)' # () link link & title
r'|' # or
r'\[(?P<preview_ref>.+?)\]' # [] link ref
r')'
r'|'
r'!\[(?P<alt2>[\s\S]*?)\]' # ![] img alt
r'('
r'\('
r'(?P<src2>.+?)'
r'(\s+'
r'(?P<title2>.+?)'
r')?\s?'
r'\)'
r'|'
r'\[(?P<ref2>.*?)\]'
r')'
)
_LEADING = re.compile(r'[\+\-\*]')
_EMPTY = re.compile(r'^[\n\ ]*$')
def test(self, parent, block):
if not block.startswith('-') or len(block) < 2:
return None
logger.debug(block)
result = self._raw_result = self._formal(block)
return bool(result)
def run(self, parent, blocks):
result = getattr(self, '_raw_result', None)
if not result:
result = self._formal(blocks[0])
blocks.pop(0)
levels = self._parse_level(x['level'] for x in result)
if levels is None:
logger.debug('level failed')
return False
logger.debug('get levels %s', levels)
large, middle, small = levels
small = small or 1
wrapper = etree.SubElement(parent, 'div')
root = etree.SubElement(wrapper, 'ul')
root.set('data-am-widget', 'gallery')
classes = ['am-gallery', 'am-gallery-bordered',
'am-avg-sm-%s ' % small]
if middle:
classes.append('am-avg-md-%s' % middle)
if large:
classes.append('am-avg-lg-%s' % large)
root.set('class', ' '.join(classes))
root.set('data-am-gallery',
"{pureview:{target: 'a', weChatImagePreview: false}}")
for each in result:
# {'preview': 'link4', 'src': 'pre-link4', 'title': None}
item = etree.SubElement(root, 'li')
container = etree.SubElement(item, 'div')
container.set('class', 'am-gallery-item')
preview_link = etree.SubElement(container, 'a')
preview_link.set('href', each['preview'] or each['src'])
img = etree.SubElement(preview_link, 'img')
img.set('src', each['src'])
if each['alt']:
img.set('alt', each['alt'])
if each['title']:
img.set('title', each['title'])
title = etree.SubElement(preview_link, 'h6')
title.set('class', "am-gallery-title")
title.text = each['title']
return True
def _formal(self, block):
prev_end = None
results = []
refs = self.parser.markdown.references
for each in self._IMG_RE.finditer(block):
this_result = {}
this_start, this_end = each.span(0)
if prev_end is None:
prev = 0
else:
prev = prev_end
leading = block[prev: this_start]
space, count = self._LEADING.subn(' ', leading)
logger.debug(repr(space))
logger.debug(self._EMPTY.match(space))
if not self._EMPTY.match(space) or count > 3:
logger.debug('leading failed %s / %s', leading, count)
return None
this_result['level'] = count
prev_end = this_end
group_dict = each.groupdict()
alt = group_dict['alt'] or group_dict['alt2']
title = (group_dict['title'] or
group_dict['title2'] or
group_dict['preview_title'])
src = group_dict['src'] or group_dict['src2']
if not src:
ref = group_dict['ref'] or group_dict['ref2']
if ref is None or ref not in refs:
return None
src, title = refs[ref]
preview_ref = group_dict['preview_ref']
if preview_ref:
if preview_ref not in refs:
return None
preview, _title = refs[preview_ref]
if title is None:
title = _title
else:
preview = group_dict['preview']
this_result['alt'] = alt
this_result['title'] = title
this_result['src'] = src
this_result['preview'] = preview or src
results.append(this_result)
logger.debug(this_result)
return results
def _parse_level(self, levels):
level = [0, 0, 0] # l, m, s
loop = []
for index, each in enumerate(levels):
if loop and loop[0] == each:
break
loop.append(each)
first = loop.pop(0)
loop.append(first)
if first == 3:
gap_limit = 2
else:
gap_limit = 1
for index, each in enumerate(loop, 1):
logger.debug(each)
gap = first - each
if gap > gap_limit or level[gap] != 0:
continue
level[gap] = index
logger.info((index, each, gap, level))
logger.debug(level)
return level
class ListGalleryExtension(Extension):
""" Add definition lists to Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add an instance of DefListProcessor to BlockParser. """
md.parser.blockprocessors.add('list_gallery',
ListGalleryProcesserProcesser(md.parser),
'<ulist')
def makeExtension(configs=None):
if configs is None:
configs = {}
return ListGalleryExtension(configs=configs)
if __name__ == '__main__':
import markdown
logging.basicConfig(
level=logging.DEBUG,
format='\033[32m%(levelname)1.1s\033[0m[%(lineno)3s]%(message)s')
md = """
- + * [](link "title2")
[](link "title2")
[](link "title2")
* [](link "title2")
[](link "title2")
[](link "title2")
+ * [](link "title2")
[](link "title2")
[](link "title2")
* [](link "title2")
[](link "title2")
[](link "title2")
+ * [](link "title2")
[](link "title2")
[](link "title2")
* [](link "title2")
[](link "title2")
[](link "title2")
- + * [](link "title2")
[](link "title2")
[](link "title2")
* [](link "title2")
[](link "title2")
[](link "title2")
+ * [](link "title2")
[](link "title2")
[](link "title2")
* [](link "title2")
[](link "title2")
[](link "title2")
+ * [](link "title2")
[](link "title2")
[](link "title2")
* [](link "title2")
[](link "title2")
[](link "title2")
"""
# md = """
# - * [](link "title2")
# [](link "title2")
# [](link "title2")
# * [](link "title2")
# [](link "title2")
# [](link "title2")
# - * [](link "title2")
# [](link "title2")
# [](link "title2")
# * [](link "title2")
# [](link "title2")
# [](link "title2")
# """
result = markdown.markdown(md, extensions=[makeExtension()])
# result = markdown.markdown(md)
print(result)
|
TylerTemp/amazedown
|
amazedown/list_avg_gallery.py
|
Python
|
gpl-3.0
| 10,841
|
import collections.abc
class Perception(collections.abc.Sequence):
"""
Represents current state of the environment at given time instance.
By default each environment attribute is represented as `str` type.
"""
__slots__ = ['_items', 'oktypes']
def __init__(self, observation, oktypes=(str,)):
assert all(type(e) in oktypes for e in observation)
self._items = tuple(observation)
@classmethod
def empty(cls):
return cls([], oktypes=(None,))
def __hash__(self):
return hash(self._items)
def __getitem__(self, i):
return self._items[i]
def __len__(self) -> int:
return len(self._items)
def __repr__(self):
return ' '.join(map(str, self))
def __eq__(self, other):
return all(p0 == p1 for p0, p1 in zip(self, other))
|
ParrotPrediction/pyalcs
|
lcs/Perception.py
|
Python
|
mit
| 838
|
import os
import re
from datetime import datetime
from config import config
from inputoutput.readers import csv_read, CSVInputReader
from inputoutput.writers import CSVWriter
from preprocessing.article_preprocessor import ArticlePreprocessor
from preprocessing.tweet_preprocessor import TweetPreprocessor
def tweet_to_date(raw_data_element):
timestamp = int(raw_data_element['timestamp']) / 1000
dt = datetime.fromtimestamp(timestamp)
return dt.date().isoformat().replace('-', '_')
def article_to_date(raw_data_element):
if 'published_date' not in raw_data_element:
print('Missing date: %s' % 'published_date')
return None
dt = datetime.strptime(raw_data_element['published_date'].split(' ')[0], '%Y-%m-%d')
return dt.date().isoformat().replace('-', '_')
def µ_tweets():
sorted_output_dir = 'H:\TWEETS\sorted'
base_filename = 'tweets'
columns = TweetPreprocessor.TWEET_COLUMNS
inputdir = os.path.join(config.PCLOUD_DIR, base_filename, '20161003_20161115')
µ(sorted_output_dir, base_filename, columns, inputdir, tweet_to_date)
def µ_articles():
inputdir = r'H:\TWEETS\POST_DL\PRE\articles_sander_results'
sorted_output_dir = r'H:\TWEETS\POST_DL\PRE\articles_sorted_sander_results'
base_filename = 'articles'
columns = ArticlePreprocessor.ARTICLE_COLUMNS
µ(sorted_output_dir, base_filename, columns, inputdir, article_to_date)
def µ(sorted_output_dir, base_filename, columns, inputdir, tweet_to_date):
if not os.path.exists(sorted_output_dir):
os.makedirs(sorted_output_dir)
# Read and process all tweets from config.TWEETS_RAW_HOMEDIR
input_index = 0
for filename in os.listdir(inputdir):
contents = csv_read(os.path.join(inputdir, filename))
input_index += 1
buffer = {}
for raw_data_element in contents[1:]:
date_str = tweet_to_date(raw_data_element)
if date_str is None:
continue
if date_str in buffer:
buffer[date_str].append(raw_data_element)
else:
buffer[date_str] = [raw_data_element, ]
write_buffer(sorted_output_dir, ('%s_%d' % (base_filename, input_index)), buffer, columns)
# then merge them
for (dirpath, dirnames, filenames) in os.walk(sorted_output_dir):
m = re.match('\d{4}_\d{2}_\d{2}', os.path.basename(dirpath))
if m:
r = CSVInputReader(dirpath, None)
w = CSVWriter(sorted_output_dir, '%s_%s_' % (base_filename, m.group(0)), columns)
for x in r:
w.write(x)
w.close()
def write_buffer(outputdir, basefilename, buffer, columns):
for date, items in buffer.items():
writer = CSVWriter(os.path.join(outputdir, date), basefilename, columns)
for item in items:
writer.write(item)
writer.close()
µ_articles()
|
den1den/web-inf-ret-ml
|
preprocessing/date_sorter.py
|
Python
|
mit
| 2,911
|
from django.utils import six
from collections import OrderedDict
from django.core.urlresolvers import reverse
from django.core.paginator import InvalidPage, Paginator as DjangoPaginator
from rest_framework import pagination
from rest_framework.exceptions import NotFound
from rest_framework.response import Response
from rest_framework.utils.urls import (
replace_query_param, remove_query_param
)
from api.base.serializers import is_anonymized
from api.base.settings import MAX_PAGE_SIZE
class JSONAPIPagination(pagination.PageNumberPagination):
"""
Custom paginator that formats responses in a JSON-API compatible format.
Properly handles pagination of embedded objects.
"""
page_size_query_param = 'page[size]'
max_page_size = MAX_PAGE_SIZE
def page_number_query(self, url, page_number):
"""
Builds uri and adds page param.
"""
url = self.request.build_absolute_uri(url)
paginated_url = replace_query_param(url, self.page_query_param, page_number)
if page_number == 1:
return remove_query_param(paginated_url, self.page_query_param)
return paginated_url
def get_first_real_link(self, url):
if not self.page.has_previous():
return None
return self.page_number_query(url, 1)
def get_last_real_link(self, url):
if not self.page.has_next():
return None
page_number = self.page.paginator.num_pages
return self.page_number_query(url, page_number)
def get_previous_real_link(self, url):
if not self.page.has_previous():
return None
page_number = self.page.previous_page_number()
return self.page_number_query(url, page_number)
def get_next_real_link(self, url):
if not self.page.has_next():
return None
page_number = self.page.next_page_number()
return self.page_number_query(url, page_number)
def get_paginated_response(self, data):
"""
Formats paginated response in accordance with JSON API.
Creates pagination links from the view_name if embedded resource,
rather than the location used in the request.
"""
kwargs = self.request.parser_context['kwargs'].copy()
embedded = kwargs.pop('is_embedded', None)
view_name = self.request.parser_context['view'].view_fqn
reversed_url = None
if embedded:
reversed_url = reverse(view_name, kwargs=kwargs)
response_dict = OrderedDict([
('data', data),
('links', OrderedDict([
('first', self.get_first_real_link(reversed_url)),
('last', self.get_last_real_link(reversed_url)),
('prev', self.get_previous_real_link(reversed_url)),
('next', self.get_next_real_link(reversed_url)),
('meta', OrderedDict([
('total', self.page.paginator.count),
('per_page', self.page.paginator.per_page),
]))
])),
])
if is_anonymized(self.request):
response_dict['meta'] = {'anonymous': True}
return Response(response_dict)
def paginate_queryset(self, queryset, request, view=None):
"""
Custom pagination of queryset. Returns page object or `None` if not configured for view.
If this is an embedded resource, returns first page, ignoring query params.
"""
if request.parser_context['kwargs'].get('is_embedded'):
paginator = DjangoPaginator(queryset, self.page_size)
page_number = 1
try:
self.page = paginator.page(page_number)
except InvalidPage as exc:
msg = self.invalid_page_message.format(
page_number=page_number, message=six.text_type(exc)
)
raise NotFound(msg)
if paginator.count > 1 and self.template is not None:
# The browsable API should display pagination controls.
self.display_page_controls = True
self.request = request
return list(self.page)
else:
return super(JSONAPIPagination, self).paginate_queryset(queryset, request, view=None)
|
brandonPurvis/osf.io
|
api/base/pagination.py
|
Python
|
apache-2.0
| 4,312
|
"""
$url play.mrt.com.mk
$type live, vod
$region North Macedonia
"""
import logging
import re
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.hls import HLSStream
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"https?://play\.mrt\.com\.mk/(live|play)/"
))
class MRTmk(Plugin):
file_re = re.compile(r"""(?P<url>https?://vod-[\d\w]+\.interspace\.com[^"',]+\.m3u8[^"',]*)""")
stream_schema = validate.Schema(
validate.all(
validate.transform(file_re.finditer),
validate.transform(list),
[validate.get("url")],
# remove duplicates
validate.transform(set),
validate.transform(list),
),
)
def _get_streams(self):
res = self.session.http.get(self.url)
stream_urls = self.stream_schema.validate(res.text)
log.debug("Found streams: {0}".format(len(stream_urls)))
if not stream_urls:
return
for stream_url in stream_urls:
try:
yield from HLSStream.parse_variant_playlist(self.session, stream_url).items()
except OSError as err:
if "403 Client Error" in str(err):
log.error("Failed to access stream, may be due to geo-restriction")
else:
raise err
__plugin__ = MRTmk
|
streamlink/streamlink
|
src/streamlink/plugins/mrtmk.py
|
Python
|
bsd-2-clause
| 1,423
|
# -*- coding: utf-8 -*-
import os, sys; sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import unittest
import subprocess
from pattern import de
try:
PATH = os.path.dirname(os.path.realpath(__file__))
except:
PATH = ""
#---------------------------------------------------------------------------------------------------
class TestInflection(unittest.TestCase):
def setUp(self):
pass
def test_gender(self):
# Assert der Hund => MASCULINE
# Assert die Studentin => FEMININE
# Assert das Auto => NEUTRAL
self.assertEqual(de.gender("Hund"), de.MASCULINE)
self.assertEqual(de.gender("Studentin"), de.FEMININE)
self.assertEqual(de.gender("Auto"), de.NEUTRAL)
def test_pluralize(self):
# Assert the accuracy of the pluralization algorithm.
from pattern.db import Datasheet
i, n = 0, 0
for tag, sg, pl in Datasheet.load(os.path.join(PATH, "corpora", "wordforms-de-celex.csv")):
if tag == "n":
if de.pluralize(sg) == pl:
i +=1
n += 1
self.assertTrue(float(i) / n > 0.69)
print("pattern.de.pluralize()")
def test_singularize(self):
# Assert the accuracy of the singularization algorithm.
from pattern.db import Datasheet
i, n = 0, 0
for tag, sg, pl in Datasheet.load(os.path.join(PATH, "corpora", "wordforms-de-celex.csv")):
if tag == "n":
if de.singularize(pl) == sg:
i +=1
n += 1
self.assertTrue(float(i) / n > 0.82)
print("pattern.de.singularize()")
def test_attributive(self):
# Assert "groß" => "großer" (masculine, nominative), and others.
for lemma, inflected, gender, role, article in (
(u"groß", u"großer", de.MALE, de.SUBJECT, None),
(u"groß", u"großen", de.MALE, de.OBJECT, None),
(u"groß", u"großem", de.MALE, de.INDIRECT, None),
(u"groß", u"großen", de.MALE, de.PROPERTY, None),
(u"groß", u"große", de.FEMALE, de.SUBJECT, None),
(u"groß", u"große", de.FEMALE, de.OBJECT, None),
(u"groß", u"großer", de.FEMALE, de.INDIRECT, None),
(u"groß", u"großes", de.NEUTRAL, de.SUBJECT, None),
(u"groß", u"großes", de.NEUTRAL, de.OBJECT, None),
(u"groß", u"großen", de.MALE, de.PROPERTY, "mein"),
(u"groß", u"großen", de.FEMALE, de.PROPERTY, "jeder"),
(u"groß", u"großen", de.FEMALE, de.PROPERTY, "mein"),
(u"groß", u"großen", de.PLURAL, de.INDIRECT, "jede"),
(u"groß", u"großen", de.PLURAL, de.PROPERTY, "jeder")):
v = de.attributive(lemma, gender, role, article)
self.assertEqual(v, inflected)
print("pattern.de.attributive()")
def test_predicative(self):
# Assert the accuracy of the predicative algorithm ("großer" => "groß").
from pattern.db import Datasheet
i, n = 0, 0
for tag, pred, attr in Datasheet.load(os.path.join(PATH, "corpora", "wordforms-de-celex.csv")):
if tag == "a":
if de.predicative(attr) == pred:
i +=1
n += 1
self.assertTrue(float(i) / n > 0.98)
print("pattern.de.predicative()")
def test_find_lemma(self):
# Assert the accuracy of the verb lemmatization algorithm.
# Note: the accuracy is higher (88%) when measured on CELEX word forms
# (presumably because de.inflect.verbs has high percentage irregular verbs).
i, n = 0, 0
for v1, v2 in de.inflect.verbs.inflections.items():
if de.inflect.verbs.find_lemma(v1) == v2:
i += 1
n += 1
self.assertTrue(float(i) / n > 0.86)
print("pattern.de.inflect.verbs.find_lemma()")
def test_find_lexeme(self):
# Assert the accuracy of the verb conjugation algorithm.
i, n = 0, 0
for v, lexeme1 in de.inflect.verbs.infinitives.items():
lexeme2 = de.inflect.verbs.find_lexeme(v)
for j in range(len(lexeme2)):
if lexeme1[j] == "":
continue
if lexeme1[j] == lexeme2[j]:
i += 1
n += 1
self.assertTrue(float(i) / n > 0.86)
print("pattern.de.inflect.verbs.find_lexeme()")
def test_conjugate(self):
# Assert different tenses with different conjugations.
for (v1, v2, tense) in (
("sein", "sein", de.INFINITIVE),
("sein", "bin", (de.PRESENT, 1, de.SINGULAR)),
("sein", "bist", (de.PRESENT, 2, de.SINGULAR)),
("sein", "ist", (de.PRESENT, 3, de.SINGULAR)),
("sein", "sind", (de.PRESENT, 1, de.PLURAL)),
("sein", "seid", (de.PRESENT, 2, de.PLURAL)),
("sein", "sind", (de.PRESENT, 3, de.PLURAL)),
("sein", "seiend", (de.PRESENT + de.PARTICIPLE)),
("sein", "war", (de.PAST, 1, de.SINGULAR)),
("sein", "warst", (de.PAST, 2, de.SINGULAR)),
("sein", "war", (de.PAST, 3, de.SINGULAR)),
("sein", "waren", (de.PAST, 1, de.PLURAL)),
("sein", "wart", (de.PAST, 2, de.PLURAL)),
("sein", "waren", (de.PAST, 3, de.PLURAL)),
("sein", "gewesen", (de.PAST + de.PARTICIPLE)),
("sein", "sei", (de.PRESENT, 2, de.SINGULAR, de.IMPERATIVE)),
("sein", "seien", (de.PRESENT, 1, de.PLURAL, de.IMPERATIVE)),
("sein", "seid", (de.PRESENT, 2, de.PLURAL, de.IMPERATIVE)),
("sein", u"sei", (de.PRESENT, 1, de.SINGULAR, de.SUBJUNCTIVE)),
("sein", u"seiest", (de.PRESENT, 2, de.SINGULAR, de.SUBJUNCTIVE)),
("sein", u"sei", (de.PRESENT, 3, de.SINGULAR, de.SUBJUNCTIVE)),
("sein", u"seien", (de.PRESENT, 1, de.PLURAL, de.SUBJUNCTIVE)),
("sein", u"seiet", (de.PRESENT, 2, de.PLURAL, de.SUBJUNCTIVE)),
("sein", u"seien", (de.PRESENT, 3, de.PLURAL, de.SUBJUNCTIVE)),
("sein", u"wäre", (de.PAST, 1, de.SINGULAR, de.SUBJUNCTIVE)),
("sein", u"wärest", (de.PAST, 2, de.SINGULAR, de.SUBJUNCTIVE)),
("sein", u"wäre", (de.PAST, 3, de.SINGULAR, de.SUBJUNCTIVE)),
("sein", u"wären", (de.PAST, 1, de.PLURAL, de.SUBJUNCTIVE)),
("sein", u"wäret", (de.PAST, 2, de.PLURAL, de.SUBJUNCTIVE)),
("sein", u"wären", (de.PAST, 3, de.PLURAL, de.SUBJUNCTIVE))):
self.assertEqual(de.conjugate(v1, tense), v2)
print("pattern.de.conjugate()")
def test_lexeme(self):
# Assert all inflections of "sein".
v = de.lexeme("sein")
self.assertEqual(v, [
"sein", "bin", "bist", "ist", "sind", "seid", "seiend",
"war", "warst", "waren", "wart", "gewesen",
"sei", "seien", "seiest", "seiet",
u"wäre", u"wärest", u"wären", u"wäret"
])
print("pattern.de.inflect.lexeme()")
def test_tenses(self):
# Assert tense recognition.
self.assertTrue((de.PRESENT, 3, de.SG) in de.tenses("ist"))
self.assertTrue("2sg" in de.tenses("bist"))
print("pattern.de.tenses()")
#---------------------------------------------------------------------------------------------------
class TestParser(unittest.TestCase):
def setUp(self):
pass
def test_find_lemmata(self):
# Assert lemmata for nouns, adjectives and verbs.
v = de.parser.find_lemmata([["Ich", "PRP"], ["sage", "VB"], [u"schöne", "JJ"], [u"Dinge", "NNS"]])
self.assertEqual(v, [
["Ich", "PRP", "ich"],
["sage", "VB", "sagen"],
[u"schöne", "JJ", u"schön"],
["Dinge", "NNS", "ding"]])
print("pattern.de.parser.find_lemmata()")
def test_parse(self):
# Assert parsed output with Penn Treebank II tags (slash-formatted).
# 1) "der große Hund" is a noun phrase, "auf der Matte" is a prepositional noun phrase.
v = de.parser.parse(u"Der große Hund sitzt auf der Matte.")
self.assertEqual(v,
u"Der/DT/B-NP/O große/JJ/I-NP/O Hund/NN/I-NP/O " + \
u"sitzt/VB/B-VP/O " + \
u"auf/IN/B-PP/B-PNP der/DT/B-NP/I-PNP Matte/NN/I-NP/I-PNP ././O/O"
)
# 2) "große" and "sitzt" lemmata are "groß" and "sitzen".
# Note how articles are problematic ("der" can be male subject but also plural possessive).
v = de.parser.parse(u"Der große Hund sitzt auf der Matte.", lemmata=True)
self.assertEqual(v,
u"Der/DT/B-NP/O/der große/JJ/I-NP/O/groß Hund/NN/I-NP/O/hund " + \
u"sitzt/VB/B-VP/O/sitzen " + \
u"auf/IN/B-PP/B-PNP/auf der/DT/B-NP/I-PNP/der Matte/NN/I-NP/I-PNP/matte ././O/O/."
)
# 3) Assert the accuracy of the German tagger.
i, n = 0, 0
for sentence in open(os.path.join(PATH, "corpora", "tagged-de-tiger.txt")).readlines():
sentence = sentence.decode("utf-8").strip()
s1 = [w.split("/") for w in sentence.split(" ")]
s1 = [de.stts2penntreebank(w, pos) for w, pos in s1]
s2 = [[w for w, pos in s1]]
s2 = de.parse(s2, tokenize=False)
s2 = [w.split("/") for w in s2.split(" ")]
for j in range(len(s1)):
if s1[j][1] == s2[j][1]:
i += 1
n += 1
self.assertTrue(float(i) / n > 0.844)
print("pattern.de.parse()")
def test_tag(self):
# Assert [("der", "DT"), ("grosse", "JJ"), ("Hund", "NN")].
v = de.tag("der grosse Hund")
self.assertEqual(v, [("der", "DT"), ("grosse", "JJ"), ("Hund", "NN")])
print("pattern.de.tag()")
def test_command_line(self):
# Assert parsed output from the command-line (example from the documentation).
p = ["python", "-m", "pattern.de", "-s", "Der grosse Hund.", "-OTCRL"]
p = subprocess.Popen(p, stdout=subprocess.PIPE)
p.wait()
v = p.stdout.read()
v = v.strip()
self.assertEqual(v, "Der/DT/B-NP/O/O/der grosse/JJ/I-NP/O/O/gross Hund/NN/I-NP/O/O/hund ././O/O/O/.")
print("python -m pattern.de")
#---------------------------------------------------------------------------------------------------
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestInflection))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestParser))
return suite
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=1).run(suite())
|
krishna11888/ai
|
third_party/pattern/test/test_de.py
|
Python
|
gpl-2.0
| 10,848
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datanode_upgrade
from ambari_commons.constants import UPGRADE_TYPE_ROLLING
from hdfs_datanode import datanode
from resource_management import Script, Fail, shell, Logger
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions import format
from resource_management.libraries.functions.decorator import retry
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.security_commons import build_expectations, \
cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, FILE_TYPE_XML
from hdfs import hdfs
from ambari_commons.os_family_impl import OsFamilyImpl
from ambari_commons import OSConst
from utils import get_hdfs_binary
from utils import get_dfsadmin_base_command
class DataNode(Script):
def get_component_name(self):
return "hadoop-hdfs-datanode"
def get_hdfs_binary(self):
"""
Get the name or path to the hdfs binary depending on the component name.
"""
component_name = self.get_component_name()
return get_hdfs_binary(component_name)
def install(self, env):
import params
env.set_params(params)
self.install_packages(env)
def configure(self, env):
import params
env.set_params(params)
hdfs("datanode")
datanode(action="configure")
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env)
datanode(action="start")
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
# pre-upgrade steps shutdown the datanode, so there's no need to call
hdfs_binary = self.get_hdfs_binary()
if upgrade_type == UPGRADE_TYPE_ROLLING:
stopped = datanode_upgrade.pre_rolling_upgrade_shutdown(hdfs_binary)
if not stopped:
datanode(action="stop")
else:
datanode(action="stop")
# verify that the datanode is down
self.check_datanode_shutdown(hdfs_binary)
def status(self, env):
import status_params
env.set_params(status_params)
datanode(action = "status")
@retry(times=24, sleep_time=5, err_class=Fail)
def check_datanode_shutdown(self, hdfs_binary):
"""
Checks that a DataNode is down by running "hdfs dfsamin getDatanodeInfo"
several times, pausing in between runs. Once the DataNode stops responding
this method will return, otherwise it will raise a Fail(...) and retry
automatically.
The stack defaults for retrying for HDFS are also way too slow for this
command; they are set to wait about 45 seconds between client retries. As
a result, a single execution of dfsadmin will take 45 seconds to retry and
the DataNode may be marked as dead, causing problems with HBase.
https://issues.apache.org/jira/browse/HDFS-8510 tracks reducing the
times for ipc.client.connect.retry.interval. In the meantime, override them
here, but only for RU.
:param hdfs_binary: name/path of the HDFS binary to use
:return:
"""
import params
# override stock retry timeouts since after 30 seconds, the datanode is
# marked as dead and can affect HBase during RU
dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
command = format('{dfsadmin_base_command} -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo {dfs_dn_ipc_address}')
is_datanode_deregistered = False
try:
shell.checked_call(command, user=params.hdfs_user, tries=1)
except:
is_datanode_deregistered = True
if not is_datanode_deregistered:
Logger.info("DataNode has not yet deregistered from the NameNode...")
raise Fail('DataNode has not yet deregistered from the NameNode...')
Logger.info("DataNode has successfully shutdown.")
return True
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class DataNodeDefault(DataNode):
def pre_upgrade_restart(self, env, upgrade_type=None):
Logger.info("Executing DataNode Stack Upgrade pre-restart")
import params
env.set_params(params)
if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
conf_select.select(params.stack_name, "hadoop", params.version)
stack_select.select("hadoop-hdfs-datanode", params.version)
def post_upgrade_restart(self, env, upgrade_type=None):
Logger.info("Executing DataNode Stack Upgrade post-restart")
import params
env.set_params(params)
hdfs_binary = self.get_hdfs_binary()
# ensure the DataNode has started and rejoined the cluster
datanode_upgrade.post_upgrade_check(hdfs_binary)
def get_log_folder(self):
import params
return params.hdfs_log_dir
def get_user(self):
import params
return params.hdfs_user
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class DataNodeWindows(DataNode):
def install(self, env):
import install_params
self.install_packages(env)
if __name__ == "__main__":
DataNode().execute()
|
alexryndin/ambari
|
ambari-server/src/main/resources/stacks/ADH/1.0/services/HDFS/package/scripts/datanode.py
|
Python
|
apache-2.0
| 5,933
|
# -*- coding: utf-8 -*-
import itertools
def diagonal(card):
""" If the password from the card is read diagonally """
diagonals = []
for i in range(card.rows * -1, card.columns):
diagonals.append(card.m.diagonal(offset=i).tolist()[0])
results = list(itertools.chain.from_iterable(diagonals))
return results
|
gszathmari/munchkin
|
munchkin/core/strategies/diagonal.py
|
Python
|
mit
| 337
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
# Standard library imports
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
basic_scatter_script = """
import numpy as np
from bokeh.plotting import figure
from bokeh.io import curdoc
N = 5
x = np.linspace(0, 4*np.pi, N)
y = np.sin(x)
p1 = figure()
p1.scatter(x,y, color="#FF00FF")
doc = curdoc()
doc.add_root(p1)
"""
basic_svg_scatter_script = """
import numpy as np
from bokeh.plotting import figure
from bokeh.io import curdoc
N = 5
x = np.linspace(0, 4*np.pi, N)
y = np.sin(x)
p1 = figure(output_backend="svg")
p1.scatter(x,y, color="#FF00FF")
doc = curdoc()
doc.add_root(p1)
"""
multi_svg_scatter_script = """
import numpy as np
from bokeh.plotting import figure
from bokeh.layouts import Row
from bokeh.io import curdoc
N = 5
x = np.linspace(0, 4*np.pi, N)
y = np.sin(x)
p1 = figure(output_backend="svg")
p1.scatter(x,y, color="#FF00FF")
p2 = figure(output_backend="svg")
p2.scatter(x,y, color="#00FF00")
doc = curdoc()
doc.add_root(Row(p1, p2))
"""
|
ericmjl/bokeh
|
tests/unit/bokeh/command/subcommands/_util_subcommands.py
|
Python
|
bsd-3-clause
| 1,803
|
"""
Automated phase correction
These functions provide support for automatic phasing of NMR data. They
consist of the core `autops` function which performs the optimisation and
a set of private functions for calculating a spectral phase quality score
for a provided spectrum.
"""
from __future__ import print_function
import numpy as np
import scipy.optimize
from .proc_base import ps
def autops(data, fn, p0=0.0, p1=0.0):
"""
Automatic linear phase correction
Parameters
----------
data : ndarray
Array of NMR data.
fn : str or function
Algorithm to use for phase scoring. Built in functions can be
specified by one of the following strings: "acme", "peak_minima"
p0 : float
Initial zero order phase in degrees.
p1 : float
Initial first order phase in degrees.
Returns
-------
ndata : ndarray
Phased NMR data.
"""
if not callable(fn):
fn = {
'peak_minima': _ps_peak_minima_score,
'acme': _ps_acme_score,
}[fn]
opt = [p0, p1]
opt = scipy.optimize.fmin(fn, x0=opt, args=(data, ))
phasedspc = ps(data, p0=opt[0], p1=opt[1])
return phasedspc
def _ps_acme_score(ph, data):
"""
Phase correction using ACME algorithm by Chen Li et al.
Journal of Magnetic Resonance 158 (2002) 164-168
Parameters
----------
pd : tuple
Current p0 and p1 values
data : ndarray
Array of NMR data.
Returns
-------
score : float
Value of the objective function (phase score)
"""
stepsize = 1
phc0, phc1 = ph
s0 = ps(data, p0=phc0, p1=phc1)
data = np.real(s0)
# Calculation of first derivatives
ds1 = np.abs((data[1:]-data[:-1]) / (stepsize*2))
p1 = ds1 / np.sum(ds1)
# Calculation of entropy
p1[p1 == 0] = 1
h1 = -p1 * np.log(p1)
h1s = np.sum(h1)
# Calculation of penalty
pfun = 0.0
as_ = data - np.abs(data)
sumas = np.sum(as_)
if sumas < 0:
pfun = pfun + np.sum((as_/2) ** 2)
p = 1000 * pfun
return h1s + p
def _ps_peak_minima_score(ph, data):
"""
Phase correction using simple minima-minimisation around highest peak
This is a naive approach but is quick and often achieves reasonable
results. The optimisation is performed by finding the highest peak in the
spectra (e.g. TMSP) and then attempting to reduce minima surrounding it.
Parameters
----------
pd : tuple
Current p0 and p1 values
data : ndarray
Array of NMR data.
Returns
-------
score : float
Value of the objective function (phase score)
"""
phc0, phc1 = ph
s0 = ps(data, p0=phc0, p1=phc1)
data = np.real(s0)
i = np.argmax(data)
mina = np.min(data[i-100:i])
minb = np.min(data[i:i+100])
return np.abs(mina - minb)
def manual_ps(data):
"""
Manual Phase correction using matplotlib
A matplotlib widget is used to manually correct the phase of a Fourier
transformed dataset. If the dataset has more than 1 dimensions, the first
trace will be picked up for phase correction. Clicking the 'Set Phase'
button will print the current linear phase parameters to the console.
.. note:: Needs matplotlib with and interactive backend.
Parameters
----------
data : ndarray
Array of NMR data.
Returns
-------
p0, p1 : float
Linear phase correction parameters. Zero and first order phase
corrections in degrees calculated from pc0, pc1 and pivot displayed
in the interactive window.
Examples
--------
>>> import nmrglue as ng
>>> p0, p1 = ng.process.proc_autophase.manual_ps(data)
>>> # do manual phase correction and close window
>>> phased_data = ng.proc_base.ps(data, p0=p0, p1=p1)
"""
from matplotlib.widgets import Slider, Button
import matplotlib.pyplot as plt
plt.subplots_adjust(left=0.25, bottom=0.35)
if len(data.shape) == 2:
data = data[0, ...]
elif len(data.shape) == 3:
data = data[0, 0, ...]
elif len(data.shape) == 4:
data = data[0, 0, 0, ...]
interactive, = plt.plot(data.real, lw=1, color='black')
axcolor = 'white'
axpc0 = plt.axes([0.25, 0.10, 0.65, 0.03], axisbg=axcolor)
axpc1 = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
axpiv = plt.axes([0.25, 0.20, 0.65, 0.03], axisbg=axcolor)
axpst = plt.axes([0.25, 0.25, 0.15, 0.04], axisbg=axcolor)
spc0 = Slider(axpc0, 'p0', -360, 360, valinit=0)
spc1 = Slider(axpc1, 'p1', -360, 360, valinit=0)
spiv = Slider(axpiv, 'pivot', 0, data.size, valinit=0)
axps = Button(axpst, 'Set Phase', color=axcolor)
def update(val):
pc0 = spc0.val * np.pi / 180
pc1 = spc1.val * np.pi / 180
pivot = spiv.val
interactive.set_ydata((data * np.exp(
1.0j * (pc0 + (pc1 * np.arange(-pivot, -pivot + data.size) /
data.size))).astype(data.dtype)).real)
plt.draw()
def setphase(val):
p0 = spc0.val-spc1.val*spiv.val/data.size
p1 = spc1.val
print(p0, p1)
spc0.on_changed(update)
spc1.on_changed(update)
spiv.on_changed(update)
axps.on_clicked(setphase)
plt.show(block=True)
p0 = spc0.val-spc1.val*spiv.val/data.size
p1 = spc1.val
return p0, p1
|
atomman/nmrglue
|
nmrglue/process/proc_autophase.py
|
Python
|
bsd-3-clause
| 5,411
|
from flask import Flask
from mako.lookup import TemplateLookup
import pkg_resources
import os
module_path = os.path.join(pkg_resources.get_distribution('petersen').location, 'petersen')
app = Flask("petersen.app")
app.config['db_url'] = 'sqlite:///dev.db'
app.secret_key = 'TRMISAVATARAANGCONFIRMED' # TODO Move to env var and use an actual random string
template_lookup = TemplateLookup([
os.path.join(pkg_resources.get_distribution('petersen').location, 'petersen', 'templates')
])
@app.route('/')
@app.route('/index.html')
def index():
return template_lookup.get_template('index.mako').render(config=app.config)
@app.route('/assets/<path:path>')
def static_files(path):
path = path.split('/')
with open(os.path.join(module_path, 'assets', *path)) as f:
return f.read()
|
TRManderson/petersen
|
petersen/app/base.py
|
Python
|
mit
| 802
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Skill.page'
db.add_column('dnd_skill', 'page', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Skill.page'
db.delete_column('dnd_skill', 'page')
models = {
'dnd.characterclass': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'CharacterClass'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'prestige': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'})
},
'dnd.dndedition': {
'Meta': {'ordering': "['name']", 'object_name': 'DndEdition'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32', 'db_index': 'True'}),
'system': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'dnd.domain': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Domain'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'})
},
'dnd.feat': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Feat'},
'benefit': ('django.db.models.fields.TextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'feat_categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.FeatCategory']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'normal': ('django.db.models.fields.TextField', [], {}),
'note': ('django.db.models.fields.TextField', [], {}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'requiredFeats': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'requiredForFeats'", 'symmetrical': 'False', 'to': "orm['dnd.Feat']"}),
'requiredSkills': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Skill']", 'symmetrical': 'False'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'}),
'special': ('django.db.models.fields.TextField', [], {}),
'specialFeatPrerequisites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.SpecialFeatPrerequisite']", 'through': "orm['dnd.FeatSpecialFeatPrerequisite']", 'symmetrical': 'False'})
},
'dnd.featcategory': {
'Meta': {'ordering': "['name']", 'object_name': 'FeatCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32', 'db_index': 'True'})
},
'dnd.featspecialfeatprerequisite': {
'Meta': {'object_name': 'FeatSpecialFeatPrerequisite'},
'feat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'special_feat_prerequisite': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpecialFeatPrerequisite']"}),
'value_1': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'value_2': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'dnd.rulebook': {
'Meta': {'ordering': "['name']", 'object_name': 'Rulebook'},
'abbr': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dnd_edition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.DndEdition']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'official_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'})
},
'dnd.skill': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Skill'},
'action': ('django.db.models.fields.TextField', [], {}),
'armor_check_penalty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'base_skill': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'check': ('django.db.models.fields.TextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32', 'db_index': 'True'}),
'special': ('django.db.models.fields.TextField', [], {}),
'synergy': ('django.db.models.fields.TextField', [], {}),
'trained_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'try_again': ('django.db.models.fields.TextField', [], {}),
'untrained': ('django.db.models.fields.TextField', [], {})
},
'dnd.specialfeatprerequisite': {
'Meta': {'ordering': "['name']", 'object_name': 'SpecialFeatPrerequisite'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'print_format': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'dnd.spell': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Spell'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'arcane_focus_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'area': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'casting_time': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'class_levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.CharacterClass']", 'through': "orm['dnd.SpellClassLevel']", 'symmetrical': 'False'}),
'description': ('django.db.models.fields.TextField', [], {}),
'descriptors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.SpellDescriptor']", 'symmetrical': 'False'}),
'divine_focus_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain_levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Domain']", 'through': "orm['dnd.SpellDomainLevel']", 'symmetrical': 'False'}),
'duration': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'material_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'range': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'saving_throw': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpellSchool']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'}),
'somatic_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'spell_resistance': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'sub_school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpellSubSchool']", 'null': 'True', 'blank': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'verbal_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'xp_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'dnd.spellclasslevel': {
'Meta': {'ordering': "['spell', 'level']", 'unique_together': "(('character_class', 'spell'),)", 'object_name': 'SpellClassLevel'},
'character_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.CharacterClass']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'spell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Spell']"})
},
'dnd.spelldescriptor': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellDescriptor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'})
},
'dnd.spelldomainlevel': {
'Meta': {'ordering': "['spell', 'level']", 'unique_together': "(('domain', 'spell'),)", 'object_name': 'SpellDomainLevel'},
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Domain']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'spell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Spell']"})
},
'dnd.spellschool': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellSchool'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32', 'db_index': 'True'})
},
'dnd.spellsubschool': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellSubSchool'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32', 'db_index': 'True'})
},
'dnd.textfeatprerequisite': {
'Feat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Feat']"}),
'Meta': {'ordering': "['text']", 'object_name': 'TextFeatPrerequisite'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '256'})
}
}
complete_apps = ['dnd']
|
gregpechiro/dndtools
|
dndtools/dnd/migrations/0005_auto__add_field_skill_page.py
|
Python
|
mit
| 14,283
|
import logging
import os
import sys
import time
from typing import TYPE_CHECKING, Optional
from packaging import version
from dvc import __version__
from dvc.utils.pkg import PKG
if TYPE_CHECKING:
from dvc.ui import RichText
logger = logging.getLogger(__name__)
class Updater:
URL = "https://updater.dvc.org"
UPDATER_FILE = "updater"
TIMEOUT = 24 * 60 * 60 # every day
TIMEOUT_GET = 10
def __init__(self, tmp_dir, friendly=False, hardlink_lock=False):
from dvc.lock import make_lock
self.updater_file = os.path.join(tmp_dir, self.UPDATER_FILE)
self.lock = make_lock(
self.updater_file + ".lock",
tmp_dir=tmp_dir,
friendly=friendly,
hardlink_lock=hardlink_lock,
)
self.current = version.parse(__version__).base_version
def _is_outdated_file(self):
ctime = os.path.getmtime(self.updater_file)
outdated = time.time() - ctime >= self.TIMEOUT
if outdated:
logger.debug(f"'{self.updater_file}' is outdated")
return outdated
def _with_lock(self, func, action):
from dvc.lock import LockError
try:
with self.lock:
func()
except LockError:
msg = "Failed to acquire '{}' before {} updates"
logger.debug(msg.format(self.lock.lockfile, action))
def check(self):
from dvc.utils import env2bool
if (
os.getenv("CI")
or env2bool("DVC_TEST")
or PKG == "snap"
or not self.is_enabled()
):
return
self._with_lock(self._check, "checking")
def _check(self):
if not os.path.exists(self.updater_file) or self._is_outdated_file():
self.fetch()
return
with open(self.updater_file, encoding="utf-8") as fobj:
import json
try:
info = json.load(fobj)
latest = info["version"]
except Exception as exc: # pylint: disable=broad-except
msg = "'{}' is not a valid json: {}"
logger.debug(msg.format(self.updater_file, exc))
self.fetch()
return
if version.parse(self.current) < version.parse(latest):
self._notify(latest)
def fetch(self, detach=True):
from dvc.daemon import daemon
if detach:
daemon(["updater"])
return
self._with_lock(self._get_latest_version, "fetching")
def _get_latest_version(self):
import json
import requests
try:
resp = requests.get(self.URL, timeout=self.TIMEOUT_GET)
info = resp.json()
except requests.exceptions.RequestException as exc:
msg = "Failed to retrieve latest version: {}"
logger.debug(msg.format(exc))
return
with open(self.updater_file, "w+", encoding="utf-8") as fobj:
json.dump(info, fobj)
def _notify(self, latest: str, pkg: Optional[str] = PKG) -> None:
from dvc.ui import ui
if not sys.stdout.isatty():
return
message = self._get_message(latest, pkg=pkg)
return ui.error_write(message, styled=True)
def _get_message(
self,
latest: str,
current: str = None,
color: str = "yellow",
pkg: Optional[str] = None,
) -> "RichText":
from dvc.ui import ui
current = current or self.current
update_message = ui.rich_text.from_markup(
f"You are using dvc version [bold]{current}[/]; "
f"however, version [bold]{latest}[/] is available."
)
instruction = ui.rich_text.from_markup(
self._get_update_instructions(pkg=pkg)
)
return ui.rich_text.assemble(
"\n", update_message, "\n", instruction, style=color
)
@staticmethod
def _get_update_instructions(pkg: Optional[str] = None) -> str:
if pkg in ("osxpkg", "exe", "binary"):
return (
"To upgrade, uninstall dvc and reinstall from "
"[blue]https://dvc.org[/]."
)
instructions = {
"pip": "pip install --upgrade dvc",
"rpm": "yum update dvc",
"brew": "brew upgrade dvc",
"deb": "apt-get install --only-upgrade dvc",
"conda": "conda update dvc",
"choco": "choco upgrade dvc",
}
if pkg not in instructions:
return (
"Find the latest release at "
"[blue]https://github.com/iterative/dvc/releases/latest[/]."
)
instruction = instructions[pkg]
return f"To upgrade, run '{instruction}'."
def is_enabled(self):
from dvc.config import Config, to_bool
enabled = to_bool(
Config(validate=False).get("core", {}).get("check_update", "true")
)
logger.debug(
"Check for update is {}abled.".format("en" if enabled else "dis")
)
return enabled
def notify_updates():
from contextlib import suppress
from dvc.repo import NotDvcRepoError, Repo
with suppress(NotDvcRepoError), Repo() as repo:
hardlink_lock = repo.config["core"].get("hardlink_lock", False)
updater = Updater(repo.tmp_dir, hardlink_lock=hardlink_lock)
updater.check()
|
dmpetrov/dataversioncontrol
|
dvc/updater.py
|
Python
|
apache-2.0
| 5,458
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-09-26 12:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0025_auto_20160628_0913'),
]
operations = [
migrations.AlterField(
model_name='project',
name='slug',
field=models.SlugField(editable=False, help_text='Short, randomish identifier used in the URL of a project.', unique=True),
),
]
|
editorsnotes/editorsnotes
|
editorsnotes/main/migrations/0026_auto_20160926_1237.py
|
Python
|
agpl-3.0
| 531
|
'''
This file is part of ConfigShell.
Copyright (c) 2011-2013 by Datera, Inc
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
'''
from fcntl import ioctl
import re
import six
import struct
import sys
from termios import TIOCGWINSZ, TCSADRAIN, tcsetattr, tcgetattr
import textwrap
import tty
from .prefs import Prefs
class Console(object):
'''
Implements various utility methods providing a console UI support toolkit,
most notably an epytext-to-console text renderer using ANSI escape
sequences. It uses the Borg pattern to share state between instances.
'''
_max_width = 132
_escape = '\033['
_ansi_format = _escape + '%dm%s'
_ansi_reset = _escape + '0m'
_re_ansi_seq = re.compile('(\033\[..?m)')
_ansi_styles = {'bold': 1,
'underline': 4,
'blink': 5,
'reverse': 7,
'concealed': 8}
colors = ['black', 'red', 'green', 'yellow',
'blue', 'magenta', 'cyan', 'white']
_ansi_fgcolors = dict(zip(colors, range(30, 38)))
_ansi_bgcolors = dict(zip(colors, range(40, 48)))
__borg_state = {}
def __init__(self, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):
'''
Initializes a Console instance.
@param stdin: The console standard input.
@type stdin: file object
@param stdout: The console standard output.
@type stdout: file object
'''
self.__dict__ = self.__borg_state
self._stdout = stdout
self._stdin = stdin
self._stderr = stderr
self.prefs = Prefs()
# Public methods
def escape(self, sequence, reply_terminator=None):
'''
Sends an escape sequence to the console, and reads the reply terminated
by reply_terminator. If reply_terminator is not specified, the reply
will not be read.
@type sequence: str
@param reply_terminator: The expected end-of-reply marker.
@type reply_terminator: str
'''
attributes = tcgetattr(self._stdin)
tty.setraw(self._stdin)
try:
self.raw_write(self._escape + sequence)
if reply_terminator is not None:
reply = ''
while reply[-len(reply_terminator):] != reply_terminator:
reply += self._stdin.read(1)
finally:
tcsetattr(self._stdin, TCSADRAIN, attributes)
if reply_terminator is not None:
reply = reply[:-len(reply_terminator)]
reply = reply.replace(self._escape, '').split(';')
return reply
def get_width(self):
'''
Returns the console width, or maximum width if we are not a terminal
device.
'''
try:
winsize = struct.pack("HHHH", 0, 0, 0, 0)
winsize = ioctl(self._stdout.fileno(), TIOCGWINSZ, winsize)
width = struct.unpack("HHHH", winsize)[1]
except IOError:
width = self._max_width
else:
if width > self._max_width:
width = self._max_width
return width
def get_cursor_xy(self):
'''
Get the current text cursor x, y coordinates.
'''
coords = [int(coord) for coord in self.escape("6n", "R")]
coords.reverse()
return coords
def set_cursor_xy(self, xpos, ypos):
'''
Set the cursor x, y coordinates.
@param xpos: The x coordinate of the cursor.
@type xpos: int
@param ypos: The y coordinate of the cursor.
@type ypos: int
'''
self.escape("%d;%dH" % (ypos, xpos))
def raw_write(self, text, output=sys.stdout):
'''
Raw console printing function.
@param text: The text to print.
@type text: str
'''
output.write(text)
output.flush()
def display(self, text, no_lf=False, error=False):
'''
Display a text with a default style.
@param text: Text to display
@type text: str
@param no_lf: Do not display a line feed.
@type no_lf: bool
'''
text = self.render_text(text)
if error:
output = self._stderr
else:
output = self._stdout
self.raw_write(text, output=output)
if not no_lf:
self.raw_write('\n', output=output)
def epy_write(self, text):
'''
Renders and print and epytext-formatted text on the console.
'''
text = self.dedent(text)
# We need to remove the last line feed, but there might be
# escape characters after it...
clean_text = ''
for index in range(1, len(text)):
if text[-index] == '\n':
clean_text = text[:-index]
if index != 1:
clean_text += text[-index+1:]
break
else:
clean_text = text
self.raw_write(clean_text, output=self._stdout)
def indent(self, text, margin=2):
'''
Indents text by margin space.
@param text: The text to be indented.
@type text: str
'''
output = ''
for line in text.split('\n'):
output += margin * ' ' + line + '\n'
return output
def dedent(self, text):
'''
A convenience function to easily write multiline text blocks that
will be later assembled in to a unique epytext string.
It removes heading newline chars and common indentation.
'''
for i in range(len(text)):
if text[i] != '\n':
break
text = text[i:]
text = textwrap.dedent(text)
text = '\n' * i + text
return text
def render_text(self, text, fgcolor=None, bgcolor=None, styles=None,
open_end=False, todefault=False):
'''
Renders some text with ANSI console colors and attributes.
@param fgcolor: ANSI color to use for text:
black, red, green, yellow, blue, magenta. cyan. white
@type fgcolor: str
@param bgcolor: ANSI color to use for background:
black, red, green, yellow, blue, magenta. cyan. white
@type bgcolor: str
@param styles: List of ANSI styles to use:
bold, underline, blink, reverse, concealed
@type styles: list of str
@param open_end: Do not reset text style at the end ot the output.
@type open_end: bool
@param todefault: Instead of resetting style at the end of the
output, reset to default color. Only if not open_end.
@type todefault: bool
'''
if self.prefs['color_mode'] and self._stdout.isatty():
if fgcolor is None:
if self.prefs['color_default']:
fgcolor = self.prefs['color_default']
if fgcolor is not None:
text = self._ansi_format % (self._ansi_fgcolors[fgcolor], text)
if bgcolor is not None:
text = self._ansi_format % (self._ansi_bgcolors[bgcolor], text)
if styles is not None:
for style in styles:
text = self._ansi_format % (self._ansi_styles[style], text)
if not open_end:
text += self._ansi_reset
if todefault and fgcolor is not None:
if self.prefs['color_default']:
text += self._ansi_format \
% (self._ansi_fgcolors[
self.prefs['color_default']], '')
return text
def wordwrap(self, text, indent=0, startindex=0, splitchars=''):
'''
Word-wrap the given string. I.e., add newlines to the string such
that any lines that are longer than terminal width or max_width
are broken into shorter lines (at the first whitespace sequence that
occurs before the limit. If the given string contains newlines, they
will not be removed. Any lines that begin with whitespace will not
be wordwrapped.
This version takes into account ANSI escape characters:
- stop escape sequence styling at the end of a split line
- start it again on the next line if needed after the indent
- do not account for the length of the escape sequences when
wrapping
@param indent: If specified, then indent each line by this number
of spaces.
@type indent: int
@param startindex: If specified, then assume that the first line
is already preceded by startindex characters.
@type startindex: int
@param splitchars: A list of non-whitespace characters which can
be used to split a line. (E.g., use '/\\' to allow path names
to be split over multiple lines.)
@rtype: str
'''
right = self.get_width()
if splitchars:
chunks = re.split(r'( +|\n|[^ \n%s]*[%s])' %
(re.escape(splitchars), re.escape(splitchars)),
text.expandtabs())
else:
chunks = re.split(r'( +|\n)', text.expandtabs())
result = [' '*(indent-startindex)]
charindex = max(indent, startindex)
current_style = ''
for chunknum, chunk in enumerate(chunks):
chunk_groups = re.split(self._re_ansi_seq, chunk)
chunk_text = ''
next_style = current_style
for group in chunk_groups:
if re.match(self._re_ansi_seq, group) is None:
chunk_text += group
else:
next_style += group
chunk_len = len(chunk_text)
if (charindex + chunk_len > right and charindex > 0) \
or chunk == '\n':
result[-1] = result[-1].rstrip()
result.append(self.render_text(
'\n' + ' '*indent + current_style, open_end=True))
charindex = indent
if chunk[:1] not in ('\n', ' '):
result.append(chunk)
charindex += chunk_len
else:
result.append(chunk)
charindex += chunk_len
current_style = next_style.split(self._ansi_reset)[-1]
return ''.join(result).rstrip()+'\n'
|
agrover/configshell-fb
|
configshell/console.py
|
Python
|
apache-2.0
| 10,940
|
from baseparser import BaseParser, grab_url, logger
# Different versions of BeautifulSoup have different properties.
# Some work with one site, some with another.
# This is BeautifulSoup 3.2.
from BeautifulSoup import BeautifulSoup
# This is BeautifulSoup 4
import bs4
import re
class PoliticoParser(BaseParser):
domains = ['www.politico.com']
feeder_pat = '^http://www.politico.com/(news/stories|story)/'
feeder_pages = ['http://www.politico.com/']
feeder_bs = bs4.BeautifulSoup
def _parse(self, html):
soup = bs4.BeautifulSoup(html)
print_link = soup.findAll('a', href=re.compile('http://dyn.politico.com/printstory.cfm.*'))[0].get('href')
html2 = grab_url(print_link)
logger.debug('got html 2')
# Now we have to switch back to bs3. Hilarious.
# and the labeled encoding is wrong, so force utf-8.
soup = BeautifulSoup(html2, convertEntities=BeautifulSoup.HTML_ENTITIES,
fromEncoding='utf-8')
self.meta = soup.findAll('meta')
p_tags = soup.findAll('p')[1:]
real_p_tags = [p for p in p_tags if
not p.findAll(attrs={'class':"twitter-follow-button"})]
self.title = soup.find('strong').getText()
entity = soup.find('span', attrs={'class':'author'})
children = list(entity.childGenerator())
try:
self.byline = 'By ' + children[1].getText()
except IndexError:
self.byline = ''
self.date = children[-1].strip()
self.body = '\n'+'\n\n'.join([p.getText() for p in real_p_tags])
|
amandabee/newsdiffs
|
parsers/politico.py
|
Python
|
mit
| 1,613
|
#!/usr/bin/env python
#
# Copyright (c) 2015, 2016, 2017 Tim Savannah under following terms:
# You may modify and redistribe this script with your project
#
# It will download the latest GoodTests.py and use it to execute the tests.
#
# This should be placed in a directory, "tests", at the root of your project. It assumes that ../$MY_PACKAGE_MODULE is the path to your test module, and will create a symlink to it in order to run tests.
# The tests should be found in $MY_TEST_DIRECTORY in given "tests" folder.
# NOTE: Since version 1.2.3, you can also import this (like from a graphical application) and call the "main()" function.
# All of the following globals are the defaults, but can be overridden when calling main() (params have the same name as the globals).
import imp
import os
import subprocess
import sys
# URL to current version of GoodTests.py - You only need to change this if you host an internal copy.
GOODTESTS_URL = 'https://raw.githubusercontent.com/kata198/GoodTests/master/GoodTests.py'
# This should be your module name, and can be any relative or absolute path, or just a module name.
# If just a module name is given, the directory must be in current directory or parent directory.
MY_PACKAGE_MODULE = 'QueryableList'
# Normally, you want to test the codebase during development, so you don't care about the site-packages installed version.
# If you want to allow testing with any module by @MY_PACKAGE_MODULE in the python path, change this to True.
ALLOW_SITE_INSTALL = False
# This is the test directory that should contain all your tests. This should be a directory in your "tests" folder
MY_TEST_DIRECTORY = 'QueryableListTests'
__version__ = '3.0.0'
__version_tuple__ = (3, 0, 0)
def findGoodTests():
'''
findGoodTests - Tries to find GoodTests.py
@return <dict> {
'path' <str> -> Path to GoodTests.py (for execution)
'success' <bool> -> True/False if we successfully found GoodTests.py
}
'''
pathSplit = os.environ['PATH'].split(':')
if '.' not in pathSplit:
pathSplit = ['.'] + pathSplit
os.environ['PATH'] = ':'.join(pathSplit)
result = ''
success = False
for path in pathSplit:
if path.endswith('/'):
path = path[:-1]
guess = path + '/GoodTests.py'
if os.path.exists(guess):
success = True
result = guess
break
return {
'path' : result,
"success" : success
}
def findExecutable(execName):
'''
findExecutable - Search PATH for an executable
@return <dict> {
'path' <str> -> Path to executable (if found, see "success")
'success' <bool> -> True/False if we successfully found requested executable
}
'''
pathSplit = os.environ['PATH'].split(':')
if '.' not in pathSplit:
pathSplit = ['.'] + pathSplit
os.environ['PATH'] = ':'.join(pathSplit)
result = ''
success = False
for path in pathSplit:
if path.endswith(os.sep):
path = path[:-1]
guess = path + os.sep + execName
if os.path.exists(guess):
success = True
result = guess
break
return {
"path" : result,
"success" : success
}
def findGoodTests():
return findExecutable('GoodTests.py')
def try_pip_install():
'''
try to pip install GoodTests.py
First, try via pip module.
If that fails, try to locate pip by dirname(current python executable) + os.sep + pip
If that does not exist, scan PATH for pip
If found a valid pip executable, invoke it to install GoodTests
otherwise, fail.
'''
didImport = False
try:
import pip
didImport = True
except:
pass
if didImport is True:
print ( "Found pip as module=pip")
res = pip.main(['install', 'GoodTests'])
if res == 0:
return 0
sys.stderr.write('Failed to install GoodTests via pip module. Falling back to pip executable...\n\n')
pipPath = os.path.dirname(sys.executable) + os.sep + 'pip'
print ( 'Searching for pip at "%s"' %(pipPath, ) )
if not os.path.exists(pipPath):
print ( '"%s" does not exist. Scanning PATH to locate a usable pip executable' %(pipPath, ))
pipPath = None
searchResults = findExecutable('pip')
if not searchResults['success']:
sys.stderr.write('Failed to find a usable pip executable in PATH.\n')
return 1 # Failed to locate a usable pip
pipPath = searchResults['path']
print ( 'Found pip executable at "%s"' %(pipPath, ) )
print ( "Executing: %s %s 'install' 'GoodTests'" %(sys.executable, pipPath) )
pipe = subprocess.Popen([sys.executable, pipPath, 'install', 'GoodTests'], shell=False, env=os.environ)
res = pipe.wait()
return res
def download_goodTests(GOODTESTS_URL=None):
'''
download_goodTests - Attempts to download GoodTests, using the default global url (or one provided).
@return <int> - 0 on success (program should continue), otherwise non-zero (program should abort with this exit status)
'''
if GOODTESTS_URL is None:
GOODTESTS_URL = globals()['GOODTESTS_URL']
validAnswer = False
while validAnswer == False:
sys.stdout.write('GoodTests not found. Would you like to install it to local folder? (y/n): ')
sys.stdout.flush()
answer = sys.stdin.readline().strip().lower()
if answer not in ('y', 'n', 'yes', 'no'):
continue
validAnswer = True
answer = answer[0]
if answer == 'n':
sys.stderr.write('Cannot run tests without installing GoodTests. http://pypi.python.org/pypi/GoodTests or https://github.com/kata198/Goodtests\n')
return 1
try:
import urllib2 as urllib
except ImportError:
try:
import urllib.request as urllib
except:
sys.stderr.write('Failed to import urllib. Trying pip.\n')
res = try_pip_install()
if res != 0:
sys.stderr.write('Failed to install GoodTests with pip or direct download. aborting.\n')
return 1
try:
response = urllib.urlopen(GOODTESTS_URL)
contents = response.read()
if str != bytes:
contents = contents.decode('ascii')
except Exception as e:
sys.stderr.write('Failed to download GoodTests.py from "%s"\n%s\n' %(GOODTESTS_URL, str(e)))
sys.stderr.write('\nTrying pip.\n')
res = try_pip_install()
if res != 0:
sys.stderr.write('Failed to install GoodTests with pip or direct download. aborting.\n')
return 1
try:
with open('GoodTests.py', 'w') as f:
f.write(contents)
except Exception as e:
sys.stderr.write('Failed to write to GoodTests.py\n%s\n' %(str(e,)))
return 1
try:
os.chmod('GoodTests.py', 0o775)
except:
sys.stderr.write('WARNING: Failed to chmod +x GoodTests.py, may not be able to be executed.\n')
try:
import GoodTests
except ImportError:
sys.stderr.write('Seemed to download GoodTests okay, but still cannot import. Aborting.\n')
return 1
return 0
def main(thisDir=None, additionalArgs=[], MY_PACKAGE_MODULE=None, ALLOW_SITE_INSTALL=None, MY_TEST_DIRECTORY=None, GOODTESTS_URL=None):
'''
Do the work - Try to find GoodTests.py, else prompt to download it, then run the tests.
@param thisDir <None/str> - None to use default (directory this test file is in, or if not obtainable, current directory).
@param additionalArgs <list> - Any additional args to pass to GoodTests.py
Remainder of params take their global (top of file) defaults unless explicitly set here. See top of file for documentation.
@return <int> - Exit code of application. 0 on success, non-zero on failure.
TODO: Standardize return codes so external applications can derive failure without parsing error strings.
'''
if MY_PACKAGE_MODULE is None:
MY_PACKAGE_MODULE = globals()['MY_PACKAGE_MODULE']
if ALLOW_SITE_INSTALL is None:
ALLOW_SITE_INSTALL = globals()['ALLOW_SITE_INSTALL']
if MY_TEST_DIRECTORY is None:
MY_TEST_DIRECTORY = globals()['MY_TEST_DIRECTORY']
if GOODTESTS_URL is None:
GOODTESTS_URL = globals()['GOODTESTS_URL']
if not thisDir:
thisDir = os.path.dirname(__file__)
if not thisDir:
thisDir = str(os.getcwd())
elif not thisDir.startswith('/'):
thisDir = str(os.getcwd()) + '/' + thisDir
# If GoodTests is in current directory, make sure we find it later
if os.path.exists('./GoodTests.py'):
os.environ['PATH'] = str(os.getcwd()) + ':' + os.environ['PATH']
os.chdir(thisDir)
goodTestsInfo = findGoodTests()
if goodTestsInfo['success'] is False:
downloadRet = download_goodTests(GOODTESTS_URL)
if downloadRet != 0:
return downloadRet
goodTestsInfo = findGoodTests()
if goodTestsInfo['success'] is False:
sys.stderr.write('Could not download or find GoodTests.py. Try to download it yourself using "pip install GoodTests", or wget %s\n' %( GOODTESTS_URL,))
return 1
baseName = os.path.basename(MY_PACKAGE_MODULE)
dirName = os.path.dirname(MY_PACKAGE_MODULE)
newPath = None
if dirName not in ('.', ''):
if dirName.startswith('.'):
dirName = os.getcwd() + os.sep + dirName + os.sep
newPath = dirName
elif dirName == '':
inCurrentDir = False
try:
imp.find_module(MY_PACKAGE_MODULE)
inCurrentDir = True
except ImportError:
# COMPAT WITH PREVIOUS runTests.py: Try plain module in parent directory
foundIt = False
oldSysPath = sys.path[:]
sys.path = [os.path.realpath(os.getcwd() + os.sep + '..' + os.sep)]
try:
imp.find_module(MY_PACKAGE_MODULE)
foundIt = True
sys.path = oldSysPath
except ImportError as e:
sys.path = oldSysPath
if not ALLOW_SITE_INSTALL:
sys.stderr.write('Cannot find "%s" locally.\n' %(MY_PACKAGE_MODULE,))
return 2
else:
try:
__import__(baseName)
except:
sys.stderr.write('Cannot find "%s" locally or in global python path.\n' %(MY_PACKAGE_MODULE,))
return 2
if foundIt is True:
newPath = os.path.realpath(os.getcwd() + os.sep + '..' + os.sep)
if inCurrentDir is True:
newPath = os.path.realpath(os.getcwd() + os.sep + '..' + os.sep)
if newPath:
newPythonPath = [newPath] + [x for x in os.environ.get('PYTHONPATH', '').split(':') if x]
os.environ['PYTHONPATH'] = ':'.join(newPythonPath)
sys.path = [newPath] + sys.path
try:
__import__(baseName)
except ImportError as e:
if baseName.endswith(('.py', '.pyc', '.pyo')):
MY_PACKAGE_MODULE = baseName[ : baseName.rindex('.')]
if e.name != MY_PACKAGE_MODULE:
sys.stderr.write('Error while importing %s: %s\n Likely this is another dependency that needs to be installed\nPerhaps run "pip install %s" or install the providing package.\n\n' %(e.name, str(e), e.name))
return 1
sys.stderr.write('Could not import %s. Either install it or otherwise add to PYTHONPATH\n%s\n' %(MY_PACKAGE_MODULE, str(e)))
return 1
if not os.path.isdir(MY_TEST_DIRECTORY):
if not os.path.exists(MY_TEST_DIRECTORY):
sys.stderr.write('Cannot find test directory: %s\n' %(MY_TEST_DIRECTORY,))
else:
sys.stderr.write('Provided test directory, "%s" is not a directory.\n' %(MY_TEST_DIRECTORY,))
return 3
sys.stdout.write('Starting test..\n')
sys.stdout.flush()
sys.stderr.flush()
didTerminate = False
pipe = subprocess.Popen([sys.executable, goodTestsInfo['path']] + additionalArgs + [MY_TEST_DIRECTORY], env=os.environ, shell=False)
while True:
try:
pipe.wait()
break
except KeyboardInterrupt:
if not didTerminate:
pipe.terminate()
didTerminate = True
else:
pipe.kill()
break
return 0
if __name__ == '__main__':
ret = main(None, sys.argv[1:])
sys.exit(ret)
|
kata198/QueryableList
|
tests/runTests.py
|
Python
|
lgpl-2.1
| 12,771
|
## @file
# This file is used to define class Configuration
#
# Copyright (c) 2008 - 2014, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Common.LongFilePathOs as os
import Common.EdkLogger as EdkLogger
from Common.DataType import *
from Common.String import *
from Common.LongFilePathSupport import OpenLongFilePath as open
## Configuration
#
# This class is used to define all items in configuration file
#
# @param Filename: The name of configuration file, the default is config.ini
#
class Configuration(object):
def __init__(self, Filename):
self.Filename = Filename
self.Version = 0.1
## Identify to if check all items
# 1 - Check all items and ignore all other detailed items
# 0 - Not check all items, the tool will go through all other detailed items to decide to check or not
#
self.CheckAll = 0
## Identify to if automatically correct mistakes
# 1 - Automatically correct
# 0 - Not automatically correct
# Only the following check points can be automatically corrected, others not listed below are not supported even it is 1
#
# GeneralCheckTab
# GeneralCheckIndentation
# GeneralCheckLine
# GeneralCheckCarriageReturn
# SpaceCheckAll
#
self.AutoCorrect = 0
# List customized Modifer here, split with ','
# Defaultly use the definition in class DataType
self.ModifierList = MODIFIER_LIST
## General Checking
self.GeneralCheckAll = 0
# Check whether NO Tab is used, replaced with spaces
self.GeneralCheckNoTab = 1
# The width of Tab
self.GeneralCheckTabWidth = 2
# Check whether the indentation is followed coding style
self.GeneralCheckIndentation = 1
# The width of indentation
self.GeneralCheckIndentationWidth = 2
# Check whether no line is exceeding defined widty
self.GeneralCheckLine = 1
# The width of a line
self.GeneralCheckLineWidth = 120
# Check whether no use of _asm in the source file
self.GeneralCheckNo_Asm = 1
# Check whether no use of "#progma" in source file except "#pragma pack(#)".
self.GeneralCheckNoProgma = 1
# Check whether there is a carriage return at the end of the file
self.GeneralCheckCarriageReturn = 1
# Check whether the file exists
self.GeneralCheckFileExistence = 1
# Check whether file has non ACSII char
self.GeneralCheckNonAcsii = 1
# Check whether UNI file is valid
self.GeneralCheckUni = 1
## Space Checking
self.SpaceCheckAll = 1
## Predicate Expression Checking
self.PredicateExpressionCheckAll = 0
# Check whether Boolean values, variable type BOOLEAN not use explicit comparisons to TRUE or FALSE
self.PredicateExpressionCheckBooleanValue = 1
# Check whether Non-Boolean comparisons use a compare operator (==, !=, >, < >=, <=).
self.PredicateExpressionCheckNonBooleanOperator = 1
# Check whether a comparison of any pointer to zero must be done via the NULL type
self.PredicateExpressionCheckComparisonNullType = 1
## Headers Checking
self.HeaderCheckAll = 0
# Check whether File header exists
self.HeaderCheckFile = 1
# Check whether Function header exists
self.HeaderCheckFunction = 1
# Check whether Meta data File header Comment End with '##'
self.HeaderCheckFileCommentEnd = 1
# Check whether C File header Comment content start with two spaces
self.HeaderCheckCFileCommentStartSpacesNum = 1
# Check whether C File header Comment's each reference at list should begin with a bullet character '-'
self.HeaderCheckCFileCommentReferenceFormat = 1
# Check whether C File header Comment have the License immediately after the ""Copyright"" line
self.HeaderCheckCFileCommentLicenseFormat = 1
## C Function Layout Checking
self.CFunctionLayoutCheckAll = 0
# Check whether return type exists and in the first line
self.CFunctionLayoutCheckReturnType = 1
# Check whether any optional functional modifiers exist and next to the return type
self.CFunctionLayoutCheckOptionalFunctionalModifier = 1
# Check whether the next line contains the function name, left justified, followed by the beginning of the parameter list
# Check whether the closing parenthesis is on its own line and also indented two spaces
self.CFunctionLayoutCheckFunctionName = 1
# Check whether the function prototypes in include files have the same form as function definitions
self.CFunctionLayoutCheckFunctionPrototype = 1
# Check whether the body of a function is contained by open and close braces that must be in the first column
self.CFunctionLayoutCheckFunctionBody = 1
# Check whether the data declarations is the first code in a module.
self.CFunctionLayoutCheckDataDeclaration = 1
# Check whether no initialization of a variable as part of its declaration
self.CFunctionLayoutCheckNoInitOfVariable = 1
# Check whether no use of STATIC for functions
self.CFunctionLayoutCheckNoStatic = 1
## Include Files Checking
self.IncludeFileCheckAll = 0
#Check whether having include files with same name
self.IncludeFileCheckSameName = 1
# Check whether all include file contents is guarded by a #ifndef statement.
# the #ifndef must be the first line of code following the file header comment
# the #endif must appear on the last line in the file
self.IncludeFileCheckIfndefStatement = 1
# Check whether include files contain only public or only private data
# Check whether include files NOT contain code or define data variables
self.IncludeFileCheckData = 1
## Declarations and Data Types Checking
self.DeclarationDataTypeCheckAll = 0
# Check whether no use of int, unsigned, char, void, static, long in any .c, .h or .asl files.
self.DeclarationDataTypeCheckNoUseCType = 1
# Check whether the modifiers IN, OUT, OPTIONAL, and UNALIGNED are used only to qualify arguments to a function and should not appear in a data type declaration
self.DeclarationDataTypeCheckInOutModifier = 1
# Check whether the EFIAPI modifier should be used at the entry of drivers, events, and member functions of protocols
self.DeclarationDataTypeCheckEFIAPIModifier = 1
# Check whether Enumerated Type has a 'typedef' and the name is capital
self.DeclarationDataTypeCheckEnumeratedType = 1
# Check whether Structure Type has a 'typedef' and the name is capital
self.DeclarationDataTypeCheckStructureDeclaration = 1
# Check whether having same Structure
self.DeclarationDataTypeCheckSameStructure = 1
# Check whether Union Type has a 'typedef' and the name is capital
self.DeclarationDataTypeCheckUnionType = 1
## Naming Conventions Checking
self.NamingConventionCheckAll = 0
# Check whether only capital letters are used for #define declarations
self.NamingConventionCheckDefineStatement = 1
# Check whether only capital letters are used for typedef declarations
self.NamingConventionCheckTypedefStatement = 1
# Check whether the #ifndef at the start of an include file uses both prefix and postfix underscore characters, '_'.
self.NamingConventionCheckIfndefStatement = 1
# Rule for path name, variable name and function name
# 1. First character should be upper case
# 2. Existing lower case in a word
# 3. No space existence
# Check whether the path name followed the rule
self.NamingConventionCheckPathName = 1
# Check whether the variable name followed the rule
self.NamingConventionCheckVariableName = 1
# Check whether the function name followed the rule
self.NamingConventionCheckFunctionName = 1
# Check whether NO use short variable name with single character
self.NamingConventionCheckSingleCharacterVariable = 1
## Doxygen Checking
self.DoxygenCheckAll = 0
# Check whether the file headers are followed Doxygen special documentation blocks in section 2.3.5
self.DoxygenCheckFileHeader = 1
# Check whether the function headers are followed Doxygen special documentation blocks in section 2.3.5
self.DoxygenCheckFunctionHeader = 1
# Check whether the first line of text in a comment block is a brief description of the element being documented.
# The brief description must end with a period.
self.DoxygenCheckCommentDescription = 1
# Check whether comment lines with '///< ... text ...' format, if it is used, it should be after the code section.
self.DoxygenCheckCommentFormat = 1
# Check whether only Doxygen commands allowed to mark the code are @bug and @todo.
self.DoxygenCheckCommand = 1
## Meta-Data File Processing Checking
self.MetaDataFileCheckAll = 0
# Check whether each file defined in meta-data exists
self.MetaDataFileCheckPathName = 1
# Generate a list for all files defined in meta-data files
self.MetaDataFileCheckGenerateFileList = 1
# The path of log file
self.MetaDataFileCheckPathOfGenerateFileList = 'File.log'
# Check whether all Library Instances defined for a given module (or dependent library instance) match the module's type.
# Each Library Instance must specify the Supported Module Types in its INF file,
# and any module specifying the library instance must be one of the supported types.
self.MetaDataFileCheckLibraryInstance = 1
# Check whether a Library Instance has been defined for all dependent library classes
self.MetaDataFileCheckLibraryInstanceDependent = 1
# Check whether the Library Instances specified by the LibraryClasses sections are listed in order of dependencies
self.MetaDataFileCheckLibraryInstanceOrder = 1
# Check whether the unnecessary inclusion of library classes in the INF file
self.MetaDataFileCheckLibraryNoUse = 1
# Check whether an INF file is specified in the FDF file, but not in the DSC file, then the INF file must be for a Binary module only
self.MetaDataFileCheckBinaryInfInFdf = 1
# Not to report error and warning related OS include file such as "windows.h" and "stdio.h"
# Check whether a PCD is set in a DSC file or the FDF file, but not in both.
self.MetaDataFileCheckPcdDuplicate = 1
# Check whether PCD settings in the FDF file can only be related to flash.
self.MetaDataFileCheckPcdFlash = 1
# Check whether PCDs used in INF files but not specified in DSC or FDF files
self.MetaDataFileCheckPcdNoUse = 1
# Check whether having duplicate guids defined for Guid/Protocol/Ppi
self.MetaDataFileCheckGuidDuplicate = 1
# Check whether all files under module directory are described in INF files
self.MetaDataFileCheckModuleFileNoUse = 1
# Check whether the PCD is correctly used in C function via its type
self.MetaDataFileCheckPcdType = 1
# Check whether there are FILE_GUID duplication among different INF files
self.MetaDataFileCheckModuleFileGuidDuplication = 1
# Check Guid Format in INF files
self.MetaDataFileCheckModuleFileGuidFormat = 1
# Check Protocol Format in INF files
self.MetaDataFileCheckModuleFileProtocolFormat = 1
# Check Ppi Format in INF files
self.MetaDataFileCheckModuleFilePpiFormat = 1
# Check Pcd Format in INF files
self.MetaDataFileCheckModuleFilePcdFormat = 1
#
# The check points in this section are reserved
#
# GotoStatementCheckAll = 0
#
self.SpellingCheckAll = 0
# The directory listed here will not be parsed, split with ','
self.SkipDirList = []
# A list for binary file ext name
self.BinaryExtList = []
# A list for only scanned folders
self.ScanOnlyDirList = []
self.ParseConfig()
def ParseConfig(self):
Filepath = os.path.normpath(self.Filename)
if not os.path.isfile(Filepath):
ErrorMsg = "Can't find configuration file '%s'" % Filepath
EdkLogger.error("Ecc", EdkLogger.ECC_ERROR, ErrorMsg, File = Filepath)
LineNo = 0
for Line in open(Filepath, 'r'):
LineNo = LineNo + 1
Line = CleanString(Line)
if Line != '':
List = GetSplitValueList(Line, TAB_EQUAL_SPLIT)
if List[0] not in self.__dict__:
ErrorMsg = "Invalid configuration option '%s' was found" % List[0]
EdkLogger.error("Ecc", EdkLogger.ECC_ERROR, ErrorMsg, File = Filepath, Line = LineNo)
if List[0] == 'ModifierList':
List[1] = GetSplitValueList(List[1], TAB_COMMA_SPLIT)
if List[0] == 'MetaDataFileCheckPathOfGenerateFileList' and List[1] == "":
continue
if List[0] == 'SkipDirList':
List[1] = GetSplitValueList(List[1], TAB_COMMA_SPLIT)
if List[0] == 'BinaryExtList':
List[1] = GetSplitValueList(List[1], TAB_COMMA_SPLIT)
self.__dict__[List[0]] = List[1]
def ShowMe(self):
print self.Filename
for Key in self.__dict__.keys():
print Key, '=', self.__dict__[Key]
|
miguelinux/vbox
|
src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/Ecc/Configuration.py
|
Python
|
gpl-2.0
| 14,351
|
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from neutron.conf.agent import common as config
from neutron.conf.plugins.ml2.drivers import ovs_conf
ovs_conf.register_ovs_agent_opts()
config.register_agent_state_opts_helper(cfg.CONF)
|
eayunstack/neutron
|
neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py
|
Python
|
apache-2.0
| 823
|
"""Constants used in tf_cnn_benchmarks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from enum import Enum
# Results fetched with this prefix will not be reduced. Instead, they will be
# passed as matrices to model's postprocess function.
UNREDUCED_ACCURACY_OP_PREFIX = "tensor:"
# Eval result values with this name prefix will be included in summary.
SIMPLE_VALUE_RESULT_PREFIX = "simple_value:"
class BenchmarkMode(object):
"""Benchmark running mode."""
TRAIN = "training"
EVAL = "evaluation"
TRAIN_AND_EVAL = "training + evaluation"
FORWARD_ONLY = "forward only"
class NetworkTopology(str, Enum):
"""Network topology describes how multiple GPUs are inter-connected.
"""
# DGX-1 uses hybrid cube mesh topology with the following device peer to peer
# matrix:
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y Y N N N
# 1: Y Y Y Y N Y N N
# 2: Y Y Y Y N N Y N
# 3: Y Y Y Y N N N Y
# 4: Y N N N Y Y Y Y
# 5: N Y N N Y Y Y Y
# 6: N N Y N Y Y Y Y
# 7: N N N Y Y Y Y Y
DGX1 = "dgx1"
# V100 in GCP are connected with the following device peer to peer matrix.
# In this topology, bandwidth of the connection depends on if it uses NVLink
# or PCIe link.
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y N Y N N
# 1: Y Y Y Y N N N N
# 2: Y Y Y Y N N N Y
# 3: Y Y Y Y N N N N
# 4: N N N N Y Y Y Y
# 5: Y N N N Y Y Y Y
# 6: N N N N Y Y Y Y
# 7: N N Y N Y Y Y Y
GCP_V100 = "gcp_v100"
|
mlperf/training_results_v0.5
|
v0.5.0/google/cloud_v100x8/code/resnet/benchmarks/scripts/tf_cnn_benchmarks/constants.py
|
Python
|
apache-2.0
| 1,519
|
import sys
from . import main
if __name__ == '__main__':
sys.exit(main())
|
davidfoerster/schema-matching
|
src/schema_matching/__main__.py
|
Python
|
mit
| 76
|
""" Binance exchange subclass """
import logging
from typing import Dict, List, Tuple
import arrow
import ccxt
from freqtrade.exceptions import (DDosProtection, InsufficientFundsError, InvalidOrderException,
OperationalException, TemporaryError)
from freqtrade.exchange import Exchange
from freqtrade.exchange.common import retrier
logger = logging.getLogger(__name__)
class Binance(Exchange):
_ft_has: Dict = {
"stoploss_on_exchange": True,
"order_time_in_force": ['gtc', 'fok', 'ioc'],
"time_in_force_parameter": "timeInForce",
"ohlcv_candle_limit": 1000,
"trades_pagination": "id",
"trades_pagination_arg": "fromId",
"l2_limit_range": [5, 10, 20, 50, 100, 500, 1000],
}
def stoploss_adjust(self, stop_loss: float, order: Dict) -> bool:
"""
Verify stop_loss against stoploss-order value (limit or price)
Returns True if adjustment is necessary.
"""
return order['type'] == 'stop_loss_limit' and stop_loss > float(order['info']['stopPrice'])
@retrier(retries=0)
def stoploss(self, pair: str, amount: float, stop_price: float, order_types: Dict) -> Dict:
"""
creates a stoploss limit order.
this stoploss-limit is binance-specific.
It may work with a limited number of other exchanges, but this has not been tested yet.
"""
# Limit price threshold: As limit price should always be below stop-price
limit_price_pct = order_types.get('stoploss_on_exchange_limit_ratio', 0.99)
rate = stop_price * limit_price_pct
ordertype = "stop_loss_limit"
stop_price = self.price_to_precision(pair, stop_price)
# Ensure rate is less than stop price
if stop_price <= rate:
raise OperationalException(
'In stoploss limit order, stop price should be more than limit price')
if self._config['dry_run']:
dry_order = self.create_dry_run_order(
pair, ordertype, "sell", amount, stop_price)
return dry_order
try:
params = self._params.copy()
params.update({'stopPrice': stop_price})
amount = self.amount_to_precision(pair, amount)
rate = self.price_to_precision(pair, rate)
order = self._api.create_order(symbol=pair, type=ordertype, side='sell',
amount=amount, price=rate, params=params)
logger.info('stoploss limit order added for %s. '
'stop price: %s. limit: %s', pair, stop_price, rate)
self._log_exchange_response('create_stoploss_order', order)
return order
except ccxt.InsufficientFunds as e:
raise InsufficientFundsError(
f'Insufficient funds to create {ordertype} sell order on market {pair}. '
f'Tried to sell amount {amount} at rate {rate}. '
f'Message: {e}') from e
except ccxt.InvalidOrder as e:
# Errors:
# `binance Order would trigger immediately.`
raise InvalidOrderException(
f'Could not create {ordertype} sell order on market {pair}. '
f'Tried to sell amount {amount} at rate {rate}. '
f'Message: {e}') from e
except ccxt.DDoSProtection as e:
raise DDosProtection(e) from e
except (ccxt.NetworkError, ccxt.ExchangeError) as e:
raise TemporaryError(
f'Could not place sell order due to {e.__class__.__name__}. Message: {e}') from e
except ccxt.BaseError as e:
raise OperationalException(e) from e
async def _async_get_historic_ohlcv(self, pair: str, timeframe: str,
since_ms: int, is_new_pair: bool = False,
raise_: bool = False
) -> Tuple[str, str, List]:
"""
Overwrite to introduce "fast new pair" functionality by detecting the pair's listing date
Does not work for other exchanges, which don't return the earliest data when called with "0"
"""
if is_new_pair:
x = await self._async_get_candle_history(pair, timeframe, 0)
if x and x[2] and x[2][0] and x[2][0][0] > since_ms:
# Set starting date to first available candle.
since_ms = x[2][0][0]
logger.info(f"Candle-data for {pair} available starting with "
f"{arrow.get(since_ms // 1000).isoformat()}.")
return await super()._async_get_historic_ohlcv(
pair=pair, timeframe=timeframe, since_ms=since_ms, is_new_pair=is_new_pair,
raise_=raise_)
|
flightcom/freqtrade
|
freqtrade/exchange/binance.py
|
Python
|
gpl-3.0
| 4,834
|
#!/usr/bin/env python
from optparse import OptionParser
import re
degenerate = {'R':'AG', 'Y':'CT', 'S':'GC', 'W':'AT', 'K':'GT',
'M':'AC', 'B':'CGT', 'D':'AGT', 'H':'ACT', 'V':'ACG',
'N':'ACGT'}
alt_map = {'ins':'0'}
complement = {'A':'T', 'C':'G', 'G':'C', 'T':'A', 'Y':'R', 'R':'Y',
'S':'S', 'W':'W', 'K':'M', 'M':'K', 'B':'V', 'V':'B',
'D':'H', 'H':'D', 'N':'N'}
def revComplement(seq):
for k,v in alt_map.items():
seq = seq.replace(k,v)
bases = list(seq)
bases = reversed([complement.get(base,base) for base in bases])
bases = ''.join(bases)
for k,v in alt_map.items():
bases = bases.replace(v,k)
return bases
def makeRE(sequence):
exportSeq = ''
for base in sequence:
if base not in degenerate:
exportSeq += base
else:
d = degenerate[base]
d = '[' + d + ']'
exportSeq += d
return exportSeq
def getHeaderParams(header):
spaceSplit = header.split(' ')
currentSeq = spaceSplit[0].strip().replace('>','')
currentHeader = header.strip()
if (len(spaceSplit) > 1) and ('#' in spaceSplit[1]):
currentCluster = spaceSplit[1].split('#')[0]
else:
currentCluster = None
return currentHeader, currentSeq, currentCluster
# Import data from fasta file and save in header and sequence variables
def importFasta(inputFileName):
reads = []
inputFile = open(inputFileName,'r')
currentDNA = ''
currentSeq = ''
currentHeader = ''
currentCluster = ''
for line in inputFile:
if '>' in line:
if currentDNA != '':
readObj = FastaSeq(currentHeader, currentDNA, currentSeq,
currentCluster)
reads.append(readObj)
currentHeader, currentSeq, currentCluster = getHeaderParams(line)
currentDNA = ''
else:
currentDNA += line.strip()
if currentDNA != '':
readObj = FastaSeq(currentHeader, currentDNA, currentSeq,
currentCluster)
reads.append(readObj)
inputFile.close()
return reads
def exportFasta(reads, outFileName):
outFile = open(outFileName, 'w')
for read in reads:
outFile.write(read.header + '\n')
outFile.write(read.seq + '\n')
outFile.close()
# Parse the joined fasta reads for designed primer sequence structure
# Output: sequences that match the designed structure
def removeFwdRevPrimer(reads, fwd, rev):
usableReads = []
forward = makeRE(fwd)
reverse = makeRE(rev)
for read in reads:
if re.search(forward, read.seq[0:len(forward)]):
pass
else:
continue
splitSeq = re.split(forward, read.seq)
fwdRemoved = splitSeq[1]
if re.search(reverse,
read.seq[len(read.seq)-len(reverse):len(read.seq)]):
pass
else:
continue
splitSeqRev = re.split(reverse,fwdRemoved)
newRead = FastaSeq(read.header, splitSeqRev[0], read.seq_id,
read.cluster)
usableReads.append(newRead)
return usableReads
# Parse the joined fasta reads for designed primer sequence structure
# Output: sequences that match the designed structure
def removeFwdPrimer(reads, fwd):
usableReads = []
primer = makeRE(fwd)
for read in reads:
if not re.search(primer, read.seq[0:len(primer)]):
continue
splitSeq = re.split(primer, read.seq)
if len(splitSeq[1]) == 0:
continue
newRead = FastaSeq(read.header, splitSeq[1], read.seq_id,
read.cluster)
usableReads.append(newRead)
return usableReads
# Parse the joined fasta reads for designed primer sequence structure
# Output: sequences that match the designed structure
def filtBarcodePrimers(reads, bcLength, fwd, rev):
usableReads = []
forward = makeRE(fwd)
reverse = makeRE(revComplement(rev))
endFwdLoc = bcLength + len(forward)
for read in reads:
if (re.search(forward, read.seq[bcLength:endFwdLoc]) and
re.search(reverse,
read.seq[len(read.seq)-len(reverse):len(read.seq)])):
pass
else:
continue
fwdRemoved = re.split(forward, read.seq)[1]
splitSeqRev = re.split(reverse, fwdRemoved)[0]
newHeader = read.header + ' droplet_bc=' + read.seq[0:bcLength]
newRead = FastaSeq(newHeader, splitSeqRev, read.seq_id,
read.cluster)
usableReads.append(newRead)
return usableReads
# Split a read by a provided degenerate primer sequence
def splitByDegenerate(seq, degenerateSeq):
REseq = makeRE(degenerateSeq)
newSeq = re.split(REseq, seq)
return newSeq
# Trim sequences to a specified length
def trimLength(reads, outputLength):
returnReads = []
for read in reads:
if len(read.seq) >= outputLength:
newSeq = read.seq[0:outputLength]
newRead = FastaSeq(read.header, newSeq, read.seq_id,
read.cluster)
returnReads.append(newRead)
return returnReads
# Select reads based on provided sample list, return selected reads
def selectSamples(sampList, reads):
outReads = []
for s in sampList:
for r in reads:
if s == r.seq_id.split('_')[0]:
outReads.append(r)
return outReads
# Remove reads that map to a provided sample list, return remaining reads
def removeSamples(sampList, reads):
outReads = []
for r in reads:
match = False
for s in sampList:
if s == r.seq_id.split('_')[0]:
match = True
if not match:
outReads.append(r)
return outReads
class FastaSeq(object):
#def __init__(self, seq_name=None, cluster=None, header, seq):
def __init__(self, header, seq, seq_id=None, cluster=None):
self.header = header
self.seq = seq
self.seq_id = seq_id
self.cluster = cluster
|
sjspence/epicBarcoder
|
epicBarcoder/reads.py
|
Python
|
mit
| 6,018
|
import datetime
import time
import enum
from collections import OrderedDict
# For backward compatibility import FileLock
from pytos.securechange.xml_objects.restapi.step.rule_decommission.rule_decommission import Step_Field_Rule_Decommission
from pytos.securechange.xml_objects.restapi.step.server_decommission.server_decommission import Step_Field_Server_Decommission
from pytos.common.functions.utils import FileLock
from pytos.secureapp.xml_objects.base_types import Base_Link_Target, URL_Link
from pytos.securechange import definitions
from pytos.securechange.xml_objects.base_types import Step_Field_Base
from pytos.securechange.xml_objects.restapi.step.access_request.accessrequest import *
from pytos.common.base_types import XML_List, XML_Object_Base, Flat_XML_Object_Base, Comparable
from pytos.common.logging.definitions import XML_LOGGER_NAME
from pytos.common.functions import str_to_bool, get_xml_node, get_xml_text_value, get_xml_int_value
from pytos.common.functions import convert_timedelta_to_seconds
from pytos.common.definitions.xml_tags import TYPE_ANY, TYPE_ATTRIB, TYPE_DNS, TYPE_IP, TYPE_OBJECT, TYPE_NETWORK, \
TYPE_HOST, SERVICE_OBJECT_TYPE_PREDEFINED, SERVICE_OBJECT_TYPE_PROTOCOL, Elements, Attributes
logger = logging.getLogger(XML_LOGGER_NAME)
# For backward compatibility use FileLock as Ticket_Lock too
Ticket_Lock = FileLock
class TicketList(XML_List):
"""
:type tickets: list[Ticket]
"""
def __init__(self, tickets):
super().__init__(Elements.TICKETS, tickets)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
tickets = []
for ticket_node in xml_node.findall(Elements.TICKET):
tickets.append(Ticket.from_xml_node(ticket_node))
return cls(tickets)
class TicketStatus(enum.Enum):
# TODO: Move this enum to be used for all usage of ticket statuses
Closed = "Ticket Closed"
Cancelled = "Ticket Cancelled"
Rejected = "Ticket Rejected"
Resolved = "Ticket Resolved"
InProgress = "In Progress"
class Ticket(XML_Object_Base):
"""
This class represents a SecureChange ticket object.
"""
EXPIRATION_DATE_FORMAT_STRING = "%Y-%m-%d"
CLOSED_STATUS = "Ticket Closed"
CANCELLED_STATUS = "Ticket Cancelled"
REJECTED_STATUS = "Ticket Rejected"
RESOLVED_STATUS = "Ticket Resolved"
IN_PROGRESS_STATUS = "In Progress"
def __init__(self, workflow, current_step, subject, ticket_id, priority, status, domain_name, sla_status,
sla_outcome, expiration_field_name, expiration_date, steps, comments, requester, application_details,
requester_id=None):
self.steps = XML_List(Elements.STEPS, sorted(steps, key=lambda step: step.id))
self.workflow = workflow
self.current_step = current_step
self.subject = subject
self.id = ticket_id
self.priority = priority
self.status = status
self.domain_name = domain_name
self.sla_status = sla_status
self.sla_outcome = sla_outcome
self.expiration_field_name = expiration_field_name
self.expiration_date = expiration_date
self.comments = comments
self.requester = requester
self.requester_id = requester_id
if application_details is not None:
self.application_details = application_details
super().__init__(Elements.TICKET)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
steps = XML_List.from_xml_node_by_tags(xml_node, Elements.STEPS, Elements.STEP, Ticket_Step)
workflow_node = get_xml_node(xml_node, Elements.WORKFLOW)
workflow = Workflow.from_xml_node(workflow_node)
subject = get_xml_text_value(xml_node, Elements.SUBJECT)
requester = get_xml_text_value(xml_node, Elements.REQUESTER)
requester_id = get_xml_int_value(xml_node, Elements.REQUESTER_ID)
ticket_id = get_xml_int_value(xml_node, Elements.ID)
priority = get_xml_text_value(xml_node, Elements.PRIORITY)
status = get_xml_text_value(xml_node, Elements.STATUS)
domain_name = get_xml_text_value(xml_node, Elements.DOMAIN_NAME)
sla_status = get_xml_text_value(xml_node, Elements.SLA_STATUS)
sla_outcome = get_xml_text_value(xml_node, Elements.SLA_OUTCOME)
expiration_field_name = get_xml_text_value(xml_node, Elements.EXPIRATION_FIELD_NAME)
expiration_date = get_xml_text_value(xml_node, Elements.EXPIRATION_DATE)
current_step_node = get_xml_node(xml_node, Elements.CURRENT_STEP, True)
if current_step_node:
current_step = Current_Step.from_xml_node(current_step_node)
else:
current_step = None
application_details_node = get_xml_node(xml_node, Elements.APPLICATION_DETAILS, True)
if application_details_node:
application_details = Application_Details.from_xml_node(application_details_node)
else:
application_details = None
comments = XML_List.from_xml_node_by_tags(xml_node, Elements.COMMENTS, Elements.COMMENT, Ticket_Comment, True)
return cls(workflow, current_step, subject, ticket_id, priority, status, domain_name, sla_status, sla_outcome,
expiration_field_name, expiration_date, steps, comments, requester, application_details,
requester_id)
def get_step_by_name(self, step_name, case_sensitive=True):
"""
Get the ticket step whose name matches the specified name.
:param step_name: The name of the ticket step that is to be returned.
:type step_name: str
:return: The ticket step whose name matches the specified name.
:rtype: Secure_Change.XML_Objects.REST.Ticket_Step
:raise ValueError: If a ticket step with the specified name can not be found.
"""
logger.debug("Steps are '%s', looking for step '%s'.", [step.name for step in self.steps], step_name)
for step in self.steps:
if case_sensitive:
if step.name == step_name:
return step
else:
if step.name.lower() == step_name.lower():
return step
raise ValueError("A ticket step with the name '%s' could not be found.", step_name)
def get_current_step(self):
"""
Return the current ticket step.
:return: The current ticket step.
:rtype: Secure_Change.XML_Objects.REST.Ticket_Step
"""
try:
return self.get_step_by_id(self.current_step.id)
except (AttributeError, ValueError):
message = "The current step for this ticket is not set."
logger.debug(message)
raise KeyError(message)
def get_current_task(self):
"""
Return the last ticket step task.
:return: The last ticket step task.
:rtype: Secure_Change.XML_Objects.REST.Step_Task
"""
return self.get_current_step().get_last_task()
def get_last_task(self):
return self.get_current_task()
def get_last_step(self):
"""
Return the last ticket step.
:return: The last ticket step.
:rtype: Secure_Change.XML_Objects.REST.Ticket_Step
"""
return self.get_step_by_index(-1)
def get_previous_step(self):
"""
Return the previous ticket step.
:return: The previous ticket step.
:rtype: Secure_Change.XML_Objects.REST.Ticket_Step
"""
return self.get_step_by_index(-2)
def get_first_step(self):
"""
Return the first ticket step.
:return: The first ticket step.
:rtype: Secure_Change.XML_Objects.REST.Ticket_Step
"""
return self.get_step_by_index(0)
def get_first_task(self):
"""
Return the first ticket task.
:return: The first ticket task.
:rtype: Secure_Change.XML_Objects.REST.Step_Task
"""
return self.get_first_step().get_last_task()
def get_step_by_id(self, step_id):
"""
Get the ticket step whose ID matches the specified ID.
:param step_id: The ID of the ticket step that is to be returned.
:type step_id: int
:return: The ticket step whose ID matches the specified ID.
:rtype: Secure_Change.XML_Objects.REST.Ticket_Step
:raise ValueError: If a ticket step with the specified ID can not be found.
"""
logger.debug("Ticket steps IDs are '%s', getting ticket step with ID '%s'", [step.id for step in self.steps],
step_id)
for step in self.steps:
if step.id == step_id:
return step
raise ValueError("A ticket step with the ID '{}' could not be found.".format(step_id))
def get_step_by_index(self, step_index):
"""
Get the ticket step whose index matches the specified index.
:param step_index: The index of the ticket step that is to be returned.
:type step_index: int
:return: The ticket step whose index matches the specified index.
:rtype: Secure_Change.XML_Objects.REST.Ticket_Step
:raise ValueError: If a ticket step with the specified index can not be found.
"""
logger.debug("Ticket steps IDs are '%s', getting ticket step with index '%s'", self.steps, step_index)
# Check that index is not larger than the amount of steps that exist.
step_ids = [step.id for step in self.steps]
logger.debug("Sorted step ID list is %s", step_ids)
return self.get_step_by_id(step_ids[step_index])
def get_last_worked_on_step(self):
"""
Get step that was last worked on
:return: Step
:rtype: int
:raise ValueError: If no step is found
"""
logger.debug("Searching for the step last worked on.")
last_step = None
for step in self.steps:
if any((task for task in step.tasks if task.status == "DONE")) and (
not last_step or step.id > last_step.id):
last_step = step
if not last_step:
raise ValueError("No step is found that was last worked on for ticket {}".format(self.id))
return last_step
def get_rejected_step(self):
logger.info("Getting rejected step")
if self.status != self.REJECTED_STATUS:
return None
for step in self.steps:
for task in step.tasks:
try:
approve_reject_field = task.get_field_list_by_type(Attributes.FIELD_TYPE_APPROVE_REJECT)[0]
except IndexError:
continue
if approve_reject_field.approved and not str_to_bool(approve_reject_field.approved):
return step
logger.debug("No step was found that was rejected for ticket {}".format(self.id))
return None
def get_last_worked_on_step_id(self):
"""
Get step ID that was last worked on
:return: Step ID
:rtype: int
:raise ValueError: If no step ID is found
"""
logger.debug("Searching for ID of the step last worked on.")
last_id = None
for step in self.steps:
if any((task for task in step.tasks if task.status == "DONE")) and (not last_id or step.id > last_id):
last_id = step.id
if not last_id:
raise ValueError("No ID is found for last worked on step for ticket {}".format(self.id))
return last_id
def is_closed(self):
if self.status == Ticket.CLOSED_STATUS:
return True
else:
return False
def is_cancelled(self):
if self.status == Ticket.CANCELLED_STATUS:
return True
else:
return False
def is_rejected(self):
if self.status == Ticket.REJECTED_STATUS:
return True
else:
return False
def is_resolved(self):
if self.status == Ticket.RESOLVED_STATUS:
return True
else:
return False
def is_in_progress(self):
if self.status == Ticket.IN_PROGRESS_STATUS:
return True
else:
return False
def get_expiry_days_left(self):
if self.expiration_date is not None:
expiration_date = datetime.datetime.strptime(self.expiration_date, Ticket.EXPIRATION_DATE_FORMAT_STRING)
return (expiration_date.date() - datetime.date.today()).days
raise ValueError("Expiration date is not set!")
def step_index(self, step):
"""
Gets the index of the provided step in the ticket.
:param step: The ticket step.
:return: The index of the step in the ticket.
:rtype int
"""
return self.steps.index(step)
def templatize(self):
"""
Prepare a ticket for use as a template.
:return:
"""
self.sanitize_ids()
del self.steps[1:]
self.current_step = None
@staticmethod
def has_no_pending_tasks(ticket):
return not any(task.is_pending() for task in ticket.get_current_step().tasks)
class Ticket_Comment(XML_Object_Base):
def __init__(self, content, created, task_name, comment_type, user):
self.content = content
self.created = created
self.task_name = task_name
self.type = comment_type
self.user = user
super().__init__(Elements.COMMENT)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
content = get_xml_text_value(xml_node, Elements.CONTENT)
created = get_xml_text_value(xml_node, Elements.CREATED)
task_name = get_xml_text_value(xml_node, Elements.TASK_NAME)
comment_type = get_xml_text_value(xml_node, Elements.TYPE)
user = get_xml_text_value(xml_node, Elements.USER)
return cls(content, created, task_name, comment_type, user)
class Current_Step(XML_Object_Base):
"""
This class represents the current step node in a SecureChange ticket object
"""
def __init__(self, num_id, name):
self.id = num_id
self.name = name
super().__init__(Elements.CURRENT_STEP)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
step_id = get_xml_int_value(xml_node, Elements.ID)
step_name = get_xml_text_value(xml_node, Elements.NAME)
return cls(step_id, step_name)
class Ticket_Step(XML_Object_Base):
"""
This class represents a step node in a SecureChange ticket object
"""
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
step_id = get_xml_int_value(xml_node, Elements.ID)
step_name = get_xml_text_value(xml_node, Elements.NAME)
skipped = get_xml_text_value(xml_node, Elements.SKIPPED)
redone = get_xml_text_value(xml_node, Elements.REDONE)
tasks = XML_List.from_xml_node_by_tags(xml_node, Elements.TASKS, Elements.TASK, Step_Task)
return cls(step_id, step_name, redone, skipped, tasks)
def __init__(self, num_id, name, redone, skipped, tasks):
"""
Initialize the object from parameters.
:param tasks: The task object for this step.
:type tasks: list[Step_Task]
"""
self.id = num_id
self.name = name
self.redone = redone
self.skipped = skipped
self.tasks = tasks
super().__init__(Elements.STEP)
def get_task_by_id(self, task_id):
"""
Get the step task whose ID matches the specified ID.
:param task_id: The ID of the step task that is to be returned.
:type task_id: int
:return: The step task whose ID matches the specified ID.
:rtype: Secure_Change.XML_Objects.REST.Step_Task
:raise ValueError: If a step task with the specified ID can not be found.
"""
for task in self.tasks:
if task.id == task_id:
logger.debug("Returning task with ID '%s': '%s'", task_id, task.to_xml_string())
return task
raise ValueError("A step task with the ID {} can not be found.".format(task_id))
def get_task_by_index(self, task_index):
"""
Get the step task whose index matches the specified index.
:param task_index: The index of the step task that is to be returned.
:type task_index: int
:return: The step task whose index matches the specified index.
:rtype: Secure_Change.XML_Objects.REST.Step_Task
:raise ValueError: If a step task with the specified index can not be found.
"""
num_of_existing_tasks = len(self.tasks)
if num_of_existing_tasks < task_index + 1:
raise ValueError("A task with an index of '{}' can not be found, highest index is '{}'.".format(task_index,
num_of_existing_tasks - 1))
task_ids = []
for task in self.tasks:
task_ids.append(task.id)
task_ids.sort()
logger.debug("Returning task with index of '%s'", task_index)
return self.get_task_by_id(task_ids[task_index])
def get_task_by_name(self, task_name):
"""
Get the step task whose name matches the specified name.
:param task_name: The name of the task that is to be returned.
:type task_name: str
:return: The step task whose ID matches the specified name.
:rtype: Secure_Change.XML_Objects.REST.Step_Task
:raise ValueError: If a step task with the specified name can not be found.
"""
for task in self.tasks:
if task.name == task_name:
logger.debug("Returning task with name '%s': '%s'", task_name, task.to_xml_string())
return task
raise ValueError("A step task with the name {} can not be found.".format(task_name))
def get_last_task(self):
"""
Get the last step task sorted by index.
:return: The step task whose index matches the specified index.
:rtype: Secure_Change.XML_Objects.REST.Step_Task
"""
return self.get_task_by_index(-1)
def is_redone(self):
"""
:return: Has the current step been redone.
:rtype: bool
"""
return str_to_bool(self.redone)
def is_skipped(self):
"""
:return: Has the current step been skipped.
:rtype: bool
"""
return str_to_bool(self.skipped)
# To keep backward compatibility with previous projects
class Step_Field_Checkbox(Step_Field_Base):
FIELD_CONTENT_ATTRIBUTES = "value"
def __init__(self, num_id, name, value, read_only=None):
self.value = value
super().__init__(num_id, name, read_only)
self.set_attrib(Attributes.XSI_TYPE, Attributes.FIELD_TYPE_CHECKBOX)
def is_checked(self):
if self.value == "true":
return True
else:
return False
def set_checked(self):
self.value = "true"
def set_unchecked(self):
self.value = "false"
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
field_name = get_xml_text_value(xml_node, Elements.NAME)
field_id = get_xml_int_value(xml_node, Elements.ID)
field_value = get_xml_text_value(xml_node, Elements.VALUE)
field_read_only = get_xml_text_value(xml_node, Elements.READ_ONLY)
return cls(field_id, field_name, field_value, field_read_only)
class Step_Field_Manager(Step_Field_Base):
FIELD_CONTENT_ATTRIBUTES = "text"
def __init__(self, num_id, name, text, read_only=None):
self.text = text
super().__init__(num_id, name, read_only)
self.set_attrib(Attributes.XSI_TYPE, Attributes.FIELD_TYPE_MANAGER)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
field_name = get_xml_text_value(xml_node, Elements.NAME)
field_id = get_xml_int_value(xml_node, Elements.ID)
field_text_area = get_xml_text_value(xml_node, Elements.TEXT)
field_read_only = get_xml_text_value(xml_node, Elements.READ_ONLY)
return cls(field_id, field_name, field_text_area, field_read_only)
class Step_Field_Text_Area(Step_Field_Base):
FIELD_CONTENT_ATTRIBUTES = "text"
def __init__(self, num_id, name, text, read_only=None):
self.text = text
super().__init__(num_id, name, read_only)
self.set_attrib(Attributes.XSI_TYPE, Attributes.FIELD_TYPE_TEXT_AREA)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
field_name = get_xml_text_value(xml_node, Elements.NAME)
field_id = get_xml_int_value(xml_node, Elements.ID)
field_text_area = get_xml_text_value(xml_node, Elements.TEXT)
field_read_only = get_xml_text_value(xml_node, Elements.READ_ONLY)
return cls(field_id, field_name, field_text_area, field_read_only)
class Step_Field_Text(Step_Field_Base):
FIELD_CONTENT_ATTRIBUTES = "text"
def __init__(self, num_id, name, text, read_only=None):
self.text = text
super().__init__(num_id, name, read_only)
self.set_attrib(Attributes.XSI_TYPE, Attributes.FIELD_TYPE_TEXT)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
field_name = get_xml_text_value(xml_node, Elements.NAME)
field_id = get_xml_int_value(xml_node, Elements.ID)
field_text_area = get_xml_text_value(xml_node, Elements.TEXT)
field_read_only = get_xml_text_value(xml_node, Elements.READ_ONLY)
return cls(field_id, field_name, field_text_area, field_read_only)
class Step_Field_Multi_Text(Step_Multi_Field_Base):
FIELD_CONTENT_ATTRIBUTES = "text_fields"
def __init__(self, num_id, name, text_fields, read_only=None):
self.text_fields = text_fields
super().__init__(num_id, name, read_only)
self.set_attrib(Attributes.XSI_TYPE, Attributes.FIELD_TYPE_MULTI_TEXT)
@classmethod
def from_xml_node(cls, xml_node):
num_id = get_xml_int_value(xml_node, Elements.ID)
name = get_xml_text_value(xml_node, Elements.NAME)
read_only = get_xml_text_value(xml_node, Elements.READ_ONLY)
text_fields = []
for text_field_node in xml_node.iter(tag=Elements.TEXT_FIELD):
text_field_id = get_xml_int_value(text_field_node, Elements.ID)
text_field_text = get_xml_text_value(text_field_node, Elements.TEXT)
text_fields.append(Text_Field(text_field_id, text_field_text))
return cls(num_id, name, text_fields, read_only)
def set_field_value(self, values):
if isinstance(values, list):
values = [Text_Field(None, v) if not isinstance(v, Text_Field) else v for v in values]
elif not isinstance(values, Text_Field):
values = Text_Field(None, values)
super().set_field_value(values)
class Step_Field_Multi_Text_Area(Step_Multi_Field_Base):
FIELD_CONTENT_ATTRIBUTES = "text_fields"
def __init__(self, num_id, name, text_fields, read_only=None):
self.text_fields = text_fields
super().__init__(num_id, name, read_only)
self.set_attrib(Attributes.XSI_TYPE, Attributes.FIELD_TYPE_MULTI_TEXT_AREA)
@classmethod
def from_xml_node(cls, xml_node):
num_id = get_xml_int_value(xml_node, Elements.ID)
name = get_xml_text_value(xml_node, Elements.NAME)
read_only = get_xml_text_value(xml_node, Elements.READ_ONLY)
text_areas = []
for text_field_node in xml_node.iter(tag=Elements.TEXT_AREA):
text_area_text = get_xml_text_value(text_field_node, Elements.TEXT)
text_areas.append(Text_Area(text_area_text))
return cls(num_id, name, text_areas, read_only)
def set_field_value(self, values):
if isinstance(values, list):
values = [Text_Area(v) if not isinstance(v, Text_Area) else v for v in values]
elif not isinstance(values, Text_Area):
values = Text_Area(values)
super().set_field_value(values)
class Text_Field(XML_Object_Base):
def __init__(self, num_id, text):
self.id = num_id
self.text = text
super().__init__(Elements.TEXT_FIELD)
def __str__(self):
if self.text:
return self.text
else:
return ""
@classmethod
def from_xml_node(cls, xml_node):
num_id = get_xml_int_value(xml_node, Elements.ID)
text = get_xml_text_value(xml_node, Elements.TEXT)
return cls(num_id, text)
class Text_Area(XML_Object_Base):
def __init__(self, text):
self.text = text
super().__init__(Elements.TEXT_AREA)
def __str__(self):
if self.text:
return self.text
else:
return ""
@classmethod
def from_xml_node(cls, xml_node):
text = get_xml_text_value(xml_node, Elements.TEXT)
return cls(text)
class Step_Field_Approve_Reject(Step_Field_Base):
FIELD_CONTENT_ATTRIBUTES = ["approved", "reason"]
def __init__(self, num_id, name, approved, reason, read_only=None):
self.approved = approved
self.reason = reason
super().__init__(num_id, name, read_only)
self.set_attrib(Attributes.XSI_TYPE, Attributes.FIELD_TYPE_APPROVE_REJECT)
def approve(self, reason):
logger.info("Setting approve/reject field to approved.")
self.approved = "true"
self.reason = reason
def reject(self, reason):
logger.info("Setting approve/reject field to rejected.")
self.approved = "false"
self.reason = reason
def is_approved(self):
if self.approved == "true":
return True
elif self.approved == "false":
return False
else:
return None
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
field_name = get_xml_text_value(xml_node, Elements.NAME)
field_id = get_xml_int_value(xml_node, Elements.ID)
field_approved = get_xml_text_value(xml_node, Elements.APPROVED)
field_reason = get_xml_text_value(xml_node, Elements.REASON)
field_read_only = get_xml_text_value(xml_node, Elements.READ_ONLY)
return cls(field_id, field_name, field_approved, field_reason, field_read_only)
class Step_Field_Date(Step_Field_Base):
DATE_FORMAT_STRING = "%Y-%m-%d"
FIELD_CONTENT_ATTRIBUTES = "value"
def __init__(self, num_id, name, date_value, read_only=None):
self.value = date_value
super().__init__(num_id, name, read_only)
self.set_attrib(Attributes.XSI_TYPE, Attributes.FIELD_TYPE_DATE)
def get_xml_datetime(self):
return self.value + "T00:00:00"
def get_remedy_datetime(self):
REMEDY_DATE_FORMAT_STRING = "%Y-%m-%dT%H:%M:%SZ"
return time.mktime(datetime.datetime.strptime(self.value, REMEDY_DATE_FORMAT_STRING))
def get_epoch_datetime(self):
return time.mktime(datetime.datetime.strptime(self.value, Step_Field_Date.DATE_FORMAT_STRING).timetuple())
def as_datetime_obj(self):
return datetime.datetime.strptime(self.value, Step_Field_Date.DATE_FORMAT_STRING)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
field_name = get_xml_text_value(xml_node, Elements.NAME)
field_id = get_xml_int_value(xml_node, Elements.ID)
field_date = get_xml_text_value(xml_node, Elements.VALUE)
field_read_only = get_xml_text_value(xml_node, Elements.READ_ONLY)
return cls(field_id, field_name, field_date, field_read_only)
class Step_Field_Time(Step_Field_Base):
TIME_FORMAT_STRING = "%H:%M"
FIELD_CONTENT_ATTRIBUTES = "value"
def __init__(self, num_id, name, time_value, read_only=None):
self.value = time_value
super().__init__(num_id, name, read_only)
self.set_attrib(Attributes.XSI_TYPE, Attributes.FIELD_TYPE_TIME)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
field_name = get_xml_text_value(xml_node, Elements.NAME)
field_id = get_xml_int_value(xml_node, Elements.ID)
field_time = get_xml_text_value(xml_node, Elements.VALUE)
field_read_only = get_xml_text_value(xml_node, Elements.READ_ONLY)
return cls(field_id, field_name, field_time, field_read_only)
class Step_Field_Drop_Down_List(Step_Field_Base):
FIELD_CONTENT_ATTRIBUTES = "selection"
def __init__(self, num_id, name, options, selection, read_only=None):
self.options = XML_List(Elements.OPTIONS, options)
self.selection = selection
super().__init__(num_id, name, read_only)
self.set_attrib(Attributes.XSI_TYPE, Attributes.FIELD_TYPE_DROP_DOWN_LIST)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
num_id = get_xml_int_value(xml_node, Elements.ID)
name = get_xml_text_value(xml_node, Elements.NAME)
options = XML_List.from_xml_node_by_tags(xml_node, Elements.OPTIONS, Elements.OPTION, Option_Node)
selection = get_xml_text_value(xml_node, Elements.SELECTION)
read_only = get_xml_text_value(xml_node, Elements.READ_ONLY)
return cls(num_id, name, options, selection, read_only)
class Step_Field_Multi_Target(Step_Multi_Field_Base):
FIELD_CONTENT_ATTRIBUTES = "targets"
def __init__(self, num_id, name, targets, read_only=None):
self.targets = targets
super().__init__(num_id, name, read_only)
self.set_attrib(Attributes.XSI_TYPE, Attributes.FIELD_TYPE_MULTI_TARGET)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
num_id = get_xml_int_value(xml_node, Elements.ID)
name = get_xml_text_value(xml_node, Elements.NAME)
read_only = get_xml_text_value(xml_node, Elements.READ_ONLY)
targets = []
for target_node in xml_node.iter(tag=Elements.TARGET):
target = Multi_Target_Object.from_xml_node(target_node)
targets.append(target)
return cls(num_id, name, targets, read_only)
class Step_Field_Multiple_Selection(Step_Multi_Field_Base):
FIELD_CONTENT_ATTRIBUTES = "selected_options"
def __init__(self, num_id, name, options, selected_options, read_only=None):
self.options = XML_List(Elements.OPTIONS, options)
self.selected_options = XML_List(Elements.SELECTED_OPTIONS, selected_options)
super().__init__(num_id, name, read_only)
self.set_attrib(Attributes.XSI_TYPE, Attributes.FIELD_TYPE_MULTIPLE_SELECTION)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
num_id = get_xml_int_value(xml_node, Elements.ID)
name = get_xml_text_value(xml_node, Elements.NAME)
read_only = get_xml_text_value(xml_node, Elements.READ_ONLY)
options = XML_List.from_xml_node_by_tags(xml_node, Elements.OPTIONS, Elements.OPTION, Option_Node)
selected_options = XML_List.from_xml_node_by_tags(xml_node, Elements.SELECTED_OPTIONS, Elements.SELECTED_OPTION,
Selected_Option)
return cls(num_id, name, options, selected_options, read_only)
class Selected_Option(XML_Object_Base):
def __init__(self, value):
self.value = value
super().__init__(Elements.SELECTED_OPTION)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
value = get_xml_text_value(xml_node, Elements.VALUE)
return cls(value)
def __str__(self):
return self.value
class Option_Node(XML_Object_Base):
def __init__(self, value):
self.value = value
super().__init__(Elements.OPTION)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
value = get_xml_text_value(xml_node, Elements.VALUE)
return cls(value)
class Step_Field_Multi_Network_Object(Step_Multi_Field_Base):
FIELD_CONTENT_ATTRIBUTES = "network_objects"
def __init__(self, num_id, name, network_objects, read_only=None):
self.network_objects = network_objects
super().__init__(num_id, name, read_only)
self.set_attrib(Attributes.XSI_TYPE, Attributes.FIELD_TYPE_MULTI_NETWORK_OBJECT)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
num_id = get_xml_int_value(xml_node, Elements.ID)
name = get_xml_text_value(xml_node, Elements.NAME)
read_only = get_xml_text_value(xml_node, Elements.READ_ONLY)
network_objects = []
for network_object_node in xml_node.iter(tag=Elements.NETWORK_OBJECT):
network_object_type = network_object_node.attrib[TYPE_ATTRIB]
if network_object_type == TYPE_DNS:
network_object = Network_Object_DNS_Host.from_xml_node(network_object_node)
elif network_object_type == TYPE_IP:
network_object = Network_Object_IP_Address.from_xml_node(network_object_node)
elif network_object_type == TYPE_ANY:
network_object = Network_Object_Any.from_xml_node(network_object_node)
elif network_object_type == TYPE_OBJECT:
network_object = Network_Object_Object.from_xml_node(network_object_node)
else:
raise ValueError("Unknown network object type {}.".format(network_object_type))
network_objects.append(network_object)
return cls(num_id, name, network_objects, read_only)
class Step_Field_Multi_Group_Change(Step_Multi_Field_Base):
FIELD_CONTENT_ATTRIBUTES = "group_changes"
def __init__(self, num_id, name, implementation_status, group_changes=None, read_only=None):
self.implementation_status = implementation_status
self.group_changes = group_changes
super().__init__(num_id, name, read_only)
self.set_attrib(Attributes.XSI_TYPE, Attributes.FIELD_TYPE_MULTI_GROUP_CHANGE)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
num_id = get_xml_int_value(xml_node, Elements.ID)
name = get_xml_text_value(xml_node, Elements.NAME)
read_only = get_xml_text_value(xml_node, Elements.READ_ONLY)
implementation_status = get_xml_text_value(xml_node, Elements.IMPLEMENTATION_STATUS)
group_changes = []
for group_change_node in xml_node.findall(Elements.GROUP_CHANGE):
group_change = Group_Change_Node.from_xml_node(group_change_node)
group_changes.append(group_change)
return cls(num_id, name, implementation_status, group_changes, read_only)
def to_pretty_str(self):
output = "Group Change field '{}'\n:".format(self.name)
for group_change in self.group_changes:
output += "\n{}\n".format(group_change.to_pretty_str())
return output
class Group_Change_Node(XML_Object_Base):
def __init__(self, name, management_name, members, change_implementation_status=None, management_id=None,
change_action=None):
self.name = name
self.management_name = management_name
self.management_id = management_id
self.members = members
self.change_action = change_action
self.change_implementation_status = change_implementation_status
super().__init__(Elements.GROUP_CHANGE)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
name = get_xml_text_value(xml_node, Elements.NAME)
management_name = get_xml_text_value(xml_node, Elements.MANAGEMENT_NAME)
management_id = get_xml_text_value(xml_node, Elements.MANAGEMENT_ID)
attr_to_class_dict = {TYPE_OBJECT: Group_Change_Member_Object, TYPE_NETWORK: Group_Change_Member_Object,
TYPE_HOST: Group_Change_Member_Object}
members = XML_List.from_xml_node_by_type_dict(xml_node, Elements.MEMBERS, Elements.MEMBER, attr_to_class_dict,
optional=True)
change_implementation_status = get_xml_text_value(xml_node, Elements.CHANGE_IMPLEMENTATION_STATUS)
change_action = get_xml_text_value(xml_node, Elements.CHANGE_ACTION)
return cls(name, management_name, members, change_implementation_status=change_implementation_status,
management_id=management_id, change_action=change_action)
def to_pretty_str(self):
pretty_string = "Modify Group Request '{}':\n".format(self.name)
pretty_string += "\tManagement Name: {}\n".format(self.management_name)
pretty_string += "\tImplementation Status: {}\n".format(self.change_implementation_status)
pretty_string += "\tMembers:\n"
for member in self.members:
pretty_string += member.to_pretty_str()
pretty_string += "\n\t\tMember Status: {}\n".format(member.status)
return pretty_string
class Multi_Target_Object(XML_Object_Base):
def __init__(self, num_id, object_name, object_type, object_details, management_name, management_id, object_UID):
self.id = num_id
self.object_name = object_name
self.object_type = object_type
self.object_details = object_details
self.object_UID = object_UID
self.management_name = management_name
self.management_id = management_id
super().__init__(Elements.TARGET)
def __str__(self):
if all([self.object_name, self.object_details]):
return "{}/{}".format(self.object_name, self.object_details)
else:
return ""
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
num_id = get_xml_int_value(xml_node, Elements.ID)
object_name = get_xml_text_value(xml_node, Elements.OBJECT_NAME)
object_type = get_xml_text_value(xml_node, Elements.OBJECT_TYPE)
object_details = get_xml_text_value(xml_node, Elements.OBJECT_DETAILS)
management_name = get_xml_text_value(xml_node, Elements.MANAGEMENT_NAME)
management_id = get_xml_int_value(xml_node, Elements.MANAGEMENT_ID)
object_UID = get_xml_text_value(xml_node, Elements.OBJECT_UID)
return cls(num_id, object_name, object_type, object_details, management_name, management_id, object_UID)
class Group_Change_Member_Object(XML_Object_Base):
def __init__(self, name, num_id, object_type, object_details, management_name, management_id, status, comment,
attr_type, uid=None, object_updated_status=None):
self.name = name
self.id = num_id
self.object_UID = uid
self.object_type = object_type
self.object_details = object_details
self.management_name = management_name
self.management_id = management_id
self.status = status
self.comment = comment
self.object_updated_status = object_updated_status
super().__init__(Elements.MEMBER)
self.set_attrib(TYPE_ATTRIB, attr_type)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
name = get_xml_text_value(xml_node, Elements.NAME)
num_id = get_xml_int_value(xml_node, Elements.ID)
object_type = get_xml_text_value(xml_node, Elements.OBJECT_TYPE)
object_details = get_xml_text_value(xml_node, Elements.OBJECT_DETAILS)
management_name = get_xml_text_value(xml_node, Elements.MANAGEMENT_NAME)
management_id = get_xml_int_value(xml_node, Elements.MANAGEMENT_ID)
status = get_xml_text_value(xml_node, Elements.STATUS)
comment = get_xml_text_value(xml_node, Elements.COMMENT)
uid = get_xml_text_value(xml_node, Elements.OBJECT_UID)
object_updated_status = get_xml_text_value(xml_node, Elements.OBJECT_UPDATED_STATUS)
attr_type = xml_node.attrib[TYPE_ATTRIB]
return cls(name, num_id, object_type, object_details, management_name, management_id, status, comment,
attr_type, uid, object_updated_status)
def __str__(self):
return self.to_pretty_str()
def to_pretty_str(self):
object_string = ""
if self.management_name:
object_string += "\n\t\tManagement Name: {}".format(self.management_name)
if self.object_type:
object_string += "\n\t\tObject Type: {}".format(self.object_type)
if self.object_details:
object_string += "\n\t\tObject Details: {}".format(self.object_details)
return object_string
class Step_Field_Multi_Service(Step_Multi_Field_Base):
FIELD_CONTENT_ATTRIBUTES = "service_objects"
def __init__(self, num_id, name, service_objects, read_only=None):
self.service_objects = service_objects
super().__init__(num_id, name, read_only)
self.set_attrib(Attributes.XSI_TYPE, Attributes.FIELD_TYPE_MULTI_SERVICE)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
num_id = get_xml_int_value(xml_node, Elements.ID)
name = get_xml_text_value(xml_node, Elements.NAME)
read_only = get_xml_text_value(xml_node, Elements.READ_ONLY)
service_type_class_dict = {SERVICE_OBJECT_TYPE_PREDEFINED: Predefined_Service_Target,
SERVICE_OBJECT_TYPE_PROTOCOL: Protocol_Service_Target,
TYPE_OBJECT: Object_Access_Request_Target, TYPE_ANY: Any_Service_Target}
service_objects = XML_List.from_xml_node_by_type_dict(xml_node, Elements.SERVICES, Elements.SERVICE,
service_type_class_dict, True)
return cls(num_id, name, service_objects, read_only)
def to_pretty_str(self):
return ', '.join(service.to_pretty_str() for service in self.service_objects)
class Step_Field_Hyperlink(Step_Field_Base):
FIELD_CONTENT_ATTRIBUTES = "url"
def __init__(self, num_id, name, url, read_only=None):
self.url = url
super().__init__(num_id, name, read_only)
self.set_attrib(Attributes.XSI_TYPE, Attributes.FIELD_TYPE_HYPERLINK)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
field_name = get_xml_text_value(xml_node, Elements.NAME)
field_id = get_xml_int_value(xml_node, Elements.ID)
field_url = get_xml_text_value(xml_node, Elements.URL)
field_read_only = get_xml_text_value(xml_node, Elements.READ_ONLY)
return cls(field_id, field_name, field_url, field_read_only)
class Step_Field_Multi_Hyperlink(Step_Multi_Field_Base):
FIELD_CONTENT_ATTRIBUTES = "hyperlinks"
def __init__(self, num_id, name, hyperlinks, read_only=None):
self.hyperlinks = hyperlinks
super().__init__(num_id, name, read_only)
self.set_attrib(Attributes.XSI_TYPE, Attributes.FIELD_TYPE_MULTI_HYPERLINK)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
num_id = get_xml_int_value(xml_node, Elements.ID)
name = get_xml_text_value(xml_node, Elements.NAME)
read_only = get_xml_text_value(xml_node, Elements.READ_ONLY)
hyperlinks = []
for hyperlink_node in xml_node.iter(tag=Elements.HYPERLINK):
hyperlinks.append(Hyperlink.from_xml_node(hyperlink_node))
return cls(num_id, name, hyperlinks, read_only)
class Hyperlink(XML_Object_Base):
def __init__(self, num_id, url):
self.id = num_id
self.url = url
super().__init__(Elements.HYPERLINK)
@classmethod
def from_xml_node(cls, xml_node):
num_id = get_xml_int_value(xml_node, Elements.ID)
url = get_xml_text_value(xml_node, Elements.URL)
return cls(num_id, url)
class Workflow(XML_Object_Base):
def __init__(self, num_id, workflow_name):
self.id = num_id
self.name = workflow_name
super().__init__(Elements.WORKFLOW)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
workflow_name = get_xml_text_value(xml_node, Elements.NAME)
workflow_id = get_xml_int_value(xml_node, Elements.ID)
return cls(workflow_id, workflow_name)
class Network_Object_IP_Address(Target_Base):
def __init__(self, num_id, ip_address, netmask):
self.ip_address = ip_address
self.netmask = netmask
super().__init__(Elements.NETWORK_OBJECT, num_id, TYPE_IP)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
num_id = get_xml_int_value(xml_node, Elements.ID)
ip_address = get_xml_text_value(xml_node, Elements.IP_ADDRESS)
netmask = get_xml_text_value(xml_node, Elements.NETMASK)
return cls(num_id, ip_address, netmask)
def to_pretty_str(self):
try:
return "\n\t\tIP Address: {}\n\t\tSubnet Mask: {}".format(self.ip_address, self.netmask)
except AttributeError:
return "\n\t\tIP Address: {}\n\t\tSubnet Mask: 255.255.255.255".format(self.ip_address)
def __str__(self):
try:
return "{}/{}".format(self.ip_address, self.netmask)
except AttributeError:
return "{}/32".format(self.ip_address)
class Network_Object_Object(Target_Base):
def __init__(self, num_id, object_name, object_type, object_details, management_name, management_id, object_UID):
self.id = num_id
self.object_name = object_name
self.object_type = object_type
self.object_details = object_details
self.object_UID = object_UID
self.management_name = management_name
self.management_id = management_id
super().__init__(Elements.NETWORK_OBJECT, num_id, TYPE_OBJECT)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
num_id = get_xml_int_value(xml_node, Elements.ID)
object_name = get_xml_text_value(xml_node, Elements.OBJECT_NAME)
object_type = get_xml_text_value(xml_node, Elements.OBJECT_TYPE)
object_details = get_xml_text_value(xml_node, Elements.OBJECT_DETAILS)
management_name = get_xml_text_value(xml_node, Elements.MANAGEMENT_NAME)
management_id = get_xml_int_value(xml_node, Elements.MANAGEMENT_ID)
object_UID = get_xml_text_value(xml_node, Elements.OBJECT_UID)
return cls(num_id, object_name, object_type, object_details, management_name, management_id, object_UID)
def __str__(self):
return "{} {}".format(self.management_name, self.object_details)
def to_pretty_str(self):
object_string = ""
if self.management_name:
object_string += "\n\t\tManagement Name: {}".format(self.management_name)
if self.object_name:
object_string += "\n\t\tObject Name: {}".format(self.object_name)
if self.object_details:
object_string += "\n\t\tObject Details: {}".format(self.object_details)
if self.object_UID:
object_string += "\n\t\tObject UID: {}".format(self.object_UID)
if self.object_type:
object_string += "\n\t\tObject Type: {}".format(self.object_type)
return object_string
class Network_Object_Any(Target_Base):
def __init__(self, num_id):
super().__init__(Elements.NETWORK_OBJECT, num_id, TYPE_ANY)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
num_id = get_xml_int_value(xml_node, Elements.ID)
return cls(num_id)
def __str__(self):
return "Any"
@staticmethod
def to_pretty_str():
"""
:rtype : str
"""
return "\n\t\tIP Address: Any"
class Network_Object_DNS_Host(Target_Base):
def __init__(self, num_id, host_name, ip_address, dns_ip_addresses=None):
self.host_name = host_name
# on 15-4 the ip_address tag was removed.
# on 16-1-HF2 it was implemented as list of IPs
self.ip_address = ip_address
self.dns_ip_addresses = dns_ip_addresses
super().__init__(Elements.NETWORK_OBJECT, num_id, TYPE_DNS)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
num_id = get_xml_int_value(xml_node, Elements.ID)
hostname = get_xml_text_value(xml_node, Elements.HOST_NAME)
# on 15-4 the ip_address tag was removed.
# on 16-1-HF2 it was implemented as list of IPs
ip_address = get_xml_text_value(xml_node, Elements.IP_ADDRESS)
dns_ip_addresses = XML_List.from_xml_node_by_tags(xml_node, Elements.DNS_IP_ADDRESSES, Elements.IP_ADDRESS,
IpAddress, optional=True)
return cls(num_id, hostname, ip_address, dns_ip_addresses)
def to_pretty_str(self):
target_string = ""
if self.ip_address:
target_string += "\n\t\tIP Address: {}".format(self.ip_address)
elif self.dns_ip_addresses:
target_string += "\n\t\tIP Addresses: {}".format(
'\n\t\t\t\t\t'.join([str(ip) for ip in self.dns_ip_addresses]))
if self.host_name:
target_string += "\n\t\tHostname: {}".format(self.host_name)
return target_string
def __str__(self):
if self.ip_address:
return "{}/{}".format(self.host_name, self.ip_address)
return "{}/{}".format(self.host_name, [str(ip) for ip in self.dns_ip_addresses])
class Step_Task(XML_Object_Base):
"""
This class represents a task node in a ticket step in a SecureChange ticket object
"""
def __init__(self, num_id, assignee, status, fields, name=None, assignee_id=None):
self.id = num_id
self.assignee = assignee
self.status = status
self.fields = XML_List(Elements.FIELDS, fields)
self.name = name
self.assignee_id = assignee_id
super().__init__(Elements.TASK)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
num_id = get_xml_int_value(xml_node, Elements.ID)
assignee = get_xml_text_value(xml_node, Elements.ASSIGNEE)
assignee_id = get_xml_int_value(xml_node, Elements.ASSIGNEE_ID)
status = get_xml_text_value(xml_node, Elements.STATUS)
task_name = get_xml_text_value(xml_node, Elements.NAME)
field_type_to_class_dict = {Attributes.FIELD_TYPE_APPROVE_REJECT: Step_Field_Approve_Reject,
Attributes.FIELD_TYPE_CHECKBOX: Step_Field_Checkbox,
Attributes.FIELD_TYPE_DATE: Step_Field_Date,
Attributes.FIELD_TYPE_DROP_DOWN_LIST: Step_Field_Drop_Down_List,
Attributes.FIELD_TYPE_HYPERLINK: Step_Field_Hyperlink,
Attributes.FIELD_TYPE_MANAGER: Step_Field_Manager,
Attributes.FIELD_TYPE_MULTI_ACCESS_REQUEST: Step_Field_Multi_Access_Request,
Attributes.FIELD_TYPE_MULTI_GROUP_CHANGE: Step_Field_Multi_Group_Change,
Attributes.FIELD_TYPE_MULTI_HYPERLINK: Step_Field_Multi_Hyperlink,
Attributes.FIELD_TYPE_MULTI_NETWORK_OBJECT: Step_Field_Multi_Network_Object,
Attributes.FIELD_TYPE_MULTI_SERVICE: Step_Field_Multi_Service,
Attributes.FIELD_TYPE_MULTI_TARGET: Step_Field_Multi_Target,
Attributes.FIELD_TYPE_MULTI_TEXT: Step_Field_Multi_Text,
Attributes.FIELD_TYPE_MULTI_TEXT_AREA: Step_Field_Multi_Text_Area,
Attributes.FIELD_TYPE_MULTIPLE_SELECTION: Step_Field_Multiple_Selection,
Attributes.FIELD_TYPE_TEXT: Step_Field_Text,
Attributes.FIELD_TYPE_TEXT_AREA: Step_Field_Text_Area,
Attributes.FIELD_TYPE_TIME: Step_Field_Time,
Attributes.FIELD_TYPE_RULE_DECOMMISSION: Step_Field_Rule_Decommission,
Attributes.FIELD_TYPE_MULTI_SERVER_DECOMMISSION_REQUEST:
Step_Field_Server_Decommission
}
fields = XML_List.from_xml_node_by_type_dict(xml_node, Elements.FIELDS, Elements.FIELD,
field_type_to_class_dict)
return cls(num_id, assignee, status, fields, task_name, assignee_id)
def get_field_by_id(self, field_id):
"""
Get the task field whose ID matches the specified ID.
:param field_id: The ID of the task field that is to be returned.
:type field_id: int
:return: The task field whose ID matches the specified ID.
:rtype: Can be any of Secure_Change.XML_Objects.REST.Step_Field*
:raise ValueError: If a task field with the specified ID can not be found.
"""
for field in self.fields:
if field.id == field_id:
return field
raise ValueError("A field with an ID of '{}' could not be found.".format(field_id))
def get_field_by_index(self, field_index):
"""
Get the task field whose index matches the specified index.
:param field_index: The index of the task field that is to be returned.
:type field_index: int
:return: The task field whose index matches the specified index.
:rtype: Can be any of Secure_Change.XML_Objects.REST.Step_Field*
:raise ValueError: If a task field with the specified index can not be found.
"""
num_of_existing_fields = len(self.fields)
if num_of_existing_fields < field_index + 1:
raise ValueError("A field with an index of '{}' can not be found, "
"highest index is '{}'.".format(field_index, num_of_existing_fields - 1))
field_ids = []
for field in self.fields:
field_ids.append(field.id)
field_ids.sort()
return self.get_field_by_id(field_ids[field_index])
def get_field_list_by_name(self, field_name, case_sensitive=True):
"""
Get the task fields whose names matches the specified name.
:param field_name: The name of the task field that is to be returned.
:type field_name: string
:return: The task fields whose types matches the specified type.
:rtype: list[T <= Secure_Change.XML_Objects.REST.Step_Field_Base]
"""
field_list = []
logger.debug("Field names are '%s', looking for field '%s'.", [field.name for field in self.fields], field_name)
for field in self.fields:
if case_sensitive:
if field.name == field_name:
field_list.append(field)
else:
if field.name.lower() == field_name.lower():
field_list.append(field)
return field_list
def get_field_list_by_name_and_type(self, field_name, field_type, case_sensitive=True):
"""
Get the task fields whose names and types match the specified name and type.
:param field_name: The name of the task field that is to be returned.
:type field_name: string
:param field_type: The type of the task field that is to be returned.
:type field_type: string
:return: The task fields that match the specified name and type.
:rtype: list[Secure_Change.XML_Objects.REST.Step_Field_Base]
"""
field_list = []
for field in self.fields:
if case_sensitive:
if field.name == field_name and field.get_field_type() == field_type:
field_list.append(field)
else:
if field.name.lower() == field_name.lower() and field.get_field_type() == field_type:
field_list.append(field)
return field_list
def get_fields_by_name(self, field_name, case_sensitive=True):
"""
Get the task fields whose names matches the specified name.
:param field_name: The name of the task field that is to be returned.
:type field_name: string
:return: The task fields whose types matches the specified type.
:rtype: A generator object of Secure_Change.XML_Objects.REST.Step_Field*
"""
logger.debug("Field names are '%s', looking for field '%s'.", [field.name for field in self.fields], field_name)
for field in self.fields:
if case_sensitive:
if field.name.strip() == field_name.strip():
yield field
else:
if field.name.lower().strip() == field_name.lower().strip():
yield field
def get_field_list_by_type(self, field_type):
"""
Get the task fields whose types matches the specified type.
:param field_type: The type of the task field that is to be returned.
:type field_type: string
:return: The task fields whose types matches the specified type.
:rtype: list[T <= Step_Field_Base]
"""
field_list = []
for field in self.fields:
if field.get_field_type() == field_type:
field_list.append(field)
return field_list
def get_fields_by_type(self, field_type):
"""
Get the task fields whose types matches the specified type.
:param field_type: The type of the task field that is to be returned.
:type field_type: string
:return: The task fields whose types matches the specified type.
:rtype: list[T <= Step_Field_Base]
"""
for field in self.fields:
if field.get_field_type() == field_type:
yield field
def mark_as_done(self):
"""Mark the current ticket step as done."""
self.status = "DONE"
def is_assigned(self):
"""Check if the task is assigned"""
if self.status == "ASSIGNED":
return True
else:
return False
def is_waiting_to_be_assigned(self):
"""Check if the task is waiting to be assigned"""
if self.status == "WAITING_TO_BE_ASSIGNED":
return True
else:
return False
def is_pending(self):
"""Check if the task is pending."""
if self.status == "PENDING":
return True
else:
return False
def is_done(self):
return self.status == "DONE"
def remove_all_fields(self):
"""
Remove all the fields from the task.
Usually used to handle a case where there are read only fields in the task and they cannot be updated.
"""
self.fields = None
def remove_read_only_fields(self):
"""
Remove all read only field in a task.
"""
self.fields = XML_List(Elements.FIELDS, [field for field in self.fields if
not field.read_only or not str_to_bool(field.read_only)])
def remove_access_request_field(self):
"""
Remove access request field for a task.
Need to use it when trying to put task with risk analysis results or verifier results
"""
self.fields = XML_List(Elements.FIELDS, [field for field in self.fields if
field.FIELD_CONTENT_ATTRIBUTES != Elements.ACCESS_REQUESTS])
class User_List(XML_List):
"""
:type users: list[Group|User]
"""
def __init__(self, users):
super().__init__(Elements.USERS, users)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
users = []
for user_node in xml_node.findall(Elements.USER):
user_type = None
try:
user_type = user_node.attrib[Attributes.XSI_NAMESPACE_TYPE]
except (AttributeError, TypeError, KeyError) as error:
logger.error(
"Failed to get the type of the User_List element. Assuming it's User. Error: {}".format(error))
if user_type == "group":
user = Group.from_xml_node(user_node)
else:
user = User.from_xml_node(user_node)
users.append(user)
return cls(users)
class User(XML_Object_Base):
def __init__(self, user_id, user_name, user_email, out_of_office_from, out_of_office_until, send_email, notes,
ldapDn, first_name=None, last_name=None, display_name=None, groups=None, roles=None, domains=None,
auth_method=None, user_type_attrib=None, user_type=None, origin=None, managed_locally=None,
ldap_configuration=None, user_phone=None):
self.id = user_id
self.name = user_name
self.first_name = first_name
self.last_name = last_name
self.display_name = display_name
self.email = user_email
self.phone = user_phone
self.out_of_office_from = out_of_office_from
self.out_of_office_until = out_of_office_until
self.send_email = send_email
self.notes = notes
self.ldapDn = ldapDn
self.member_of = groups
self.roles = roles
self.domains = domains
self.authentication_method = auth_method
self.type = user_type
self.origin_type = origin
self.managed_locally = managed_locally
self.ldap_configuration = ldap_configuration
# FIXME: name tag should be reused for that one
self.user_name = user_name
super().__init__(Elements.USER)
if user_type_attrib:
self.set_attrib(Attributes.XSI_NAMESPACE_TYPE, user_type_attrib)
def get_name_fields(self):
return self.name, self.display_name
@classmethod
def instantiate_ldap_user_object(cls, user_name, managed_locally=None, ldap_configuration=None):
# TODO: Check if managed_locally should be set as enum instead of string
"""
:param user_name: the name of the user to import from ldap
:type user_name: str
:param managed_locally: "true" or "false"
:type managed_locally: str
:param ldap_configuration: the list of the ldap config IDs to import from
:type ldap_configuration: int|str|list|tuple|XML_List
:return: user object ready to post to SC in order to import user
:rtype: User
"""
if isinstance(ldap_configuration, (int, str)):
ldap_config_ids = [Flat_XML_Object_Base(Elements.ID, content=ldap_configuration)]
ldap_configuration = XML_List(Elements.LDAP_CONFIGURATION, ldap_config_ids)
elif isinstance(ldap_configuration, (list, tuple)):
ldap_config_ids = [Flat_XML_Object_Base(Elements.ID, content=ldap_id) for ldap_id in ldap_configuration]
ldap_configuration = XML_List(Elements.LDAP_CONFIGURATION, ldap_config_ids)
return cls(None, user_name, None, None, None, None, None, None, origin="LDAP",
ldap_configuration=ldap_configuration, managed_locally=managed_locally)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
user_id = get_xml_int_value(xml_node, Elements.ID)
user_name = get_xml_text_value(xml_node, Elements.NAME)
first_name = get_xml_text_value(xml_node, Elements.FIRST_NAME)
last_name = get_xml_text_value(xml_node, Elements.LAST_NAME)
display_name = get_xml_text_value(xml_node, Elements.DISPLAY_NAME)
user_email = get_xml_text_value(xml_node, Elements.EMAIL)
out_of_office_from = get_xml_text_value(xml_node, Elements.OUT_OF_OFFICE_FROM)
out_of_office_until = get_xml_text_value(xml_node, Elements.OUT_OF_OFFICE_UNTIL)
send_email = get_xml_text_value(xml_node, Elements.SEND_EMAIL)
user_phone = get_xml_text_value(xml_node, Elements.PHONE)
notes = get_xml_text_value(xml_node, Elements.NOTES)
ldapDn = get_xml_text_value(xml_node, Elements.LDAPDN)
user_type = get_xml_text_value(xml_node, Elements.TYPE)
groups = []
groups_node = get_xml_node(xml_node, Elements.MEMBER_OF, True)
if groups_node:
for group_node in groups_node.iter(tag=Elements.USER):
groups.append(Group.from_xml_node(group_node))
roles = []
roles_node = get_xml_node(xml_node, Elements.ROLES, True)
if roles_node:
roles = Roles.from_xml_node(roles_node)
domains = []
domains_node = get_xml_node(xml_node, Elements.DOMAINS, True)
if domains_node:
domains = Domains.from_xml_node(domains_node)
auth_method = get_xml_text_value(xml_node, Elements.AUTHENTICATION_METHOD)
try:
user_type_attrib = xml_node.attrib[Attributes.XSI_NAMESPACE_TYPE]
except KeyError:
user_type_attrib = None
return cls(user_id, user_name, user_email, out_of_office_from, out_of_office_until, send_email, notes, ldapDn,
first_name, last_name, display_name, groups, roles, domains, auth_method, user_type_attrib,
user_type, user_phone)
class Group_Permission(XML_Object_Base):
def __init__(self, permission_name, permission_value):
self.name = permission_name
self.value = permission_value
super().__init__(Elements.GROUPPERMISSION)
@classmethod
def from_xml_node(cls, xml_node):
permission_name = get_xml_text_value(xml_node, Elements.NAME)
permission_value = get_xml_text_value(xml_node, Elements.VALUE)
return cls(permission_name, permission_value)
class Group_Permissions(XML_List):
"""
:type group_permissions: list[Group_Permission]
"""
def __init__(self, group_permissions):
super().__init__(Elements.GROUPPERMISSIONS, group_permissions)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
group_permissions = []
for permission_node in xml_node.iter(tag=Elements.GROUPPERMISSION):
group_permissions.append(Group_Permission.from_xml_node(permission_node))
return cls(group_permissions)
class Member_User(XML_Object_Base):
def __init__(self, user_id, user_name, link, user_type):
self.id = user_id
self.name = user_name
self.link = link
super().__init__(Elements.USER)
self.set_attrib(Attributes.XSI_NAMESPACE_TYPE, user_type)
@classmethod
def from_xml_node(cls, xml_node):
user_id = get_xml_int_value(xml_node, Elements.ID)
user_name = get_xml_text_value(xml_node, Elements.NAME)
link = get_xml_text_value(xml_node, Elements.LINK)
user_type = xml_node.attrib[Attributes.XSI_NAMESPACE_TYPE]
return cls(user_id, user_name, link, user_type)
class Members(XML_List):
def __init__(self, members_list, partial_list):
self.partial_list = partial_list
super().__init__(Elements.MEMBERS, members_list)
@classmethod
def from_xml_node(cls, xml_node):
partial_list = get_xml_text_value(xml_node, Elements.PARTIAL_LIST)
members_list = []
for user_node in xml_node.iter(tag=Elements.USER):
members_list.append(Member_User.from_xml_node(user_node))
members_list.sort(key=lambda member: member.id)
return cls(members_list, partial_list)
class Group(XML_Object_Base):
def __init__(self, user_id, user_name, user_email, out_of_office_from, out_of_office_until, send_email, notes,
ldapDn, group_permission, members, user_type=None, roles=None):
self.id = user_id
self.name = user_name
self.email = user_email
self.out_of_office_from = out_of_office_from
self.out_of_office_until = out_of_office_until
self.send_email = send_email
self.notes = notes
self.ldapDn = ldapDn
self.group_permission = group_permission
self.members = members
self.type = user_type
self.roles = roles
super().__init__(Elements.GROUP)
def get_name_fields(self):
return self.name,
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
user_id = get_xml_int_value(xml_node, Elements.ID)
user_name = get_xml_text_value(xml_node, Elements.NAME)
user_email = get_xml_text_value(xml_node, Elements.EMAIL)
out_of_office_from = get_xml_text_value(xml_node, Elements.OUT_OF_OFFICE_FROM)
out_of_office_until = get_xml_text_value(xml_node, Elements.OUT_OF_OFFICE_UNTIL)
send_email = get_xml_text_value(xml_node, Elements.SEND_EMAIL)
notes = get_xml_text_value(xml_node, Elements.NOTES)
user_type = get_xml_text_value(xml_node, Elements.TYPE)
ldapDn = get_xml_text_value(xml_node, Elements.LDAPDN)
g_permission_node = get_xml_node(xml_node, Elements.GROUPPERMISSIONS, True)
if g_permission_node:
group_permission = Group_Permissions.from_xml_node(g_permission_node)
else:
group_permission = None
members_node = get_xml_node(xml_node, Elements.MEMBERS, True)
if members_node:
members = Members.from_xml_node(members_node)
else:
members = []
roles_node = get_xml_node(xml_node, Elements.ROLES, True)
if roles_node:
roles = Roles.from_xml_node(roles_node)
else:
roles = []
return cls(user_id, user_name, user_email, out_of_office_from, out_of_office_until, send_email, notes, ldapDn,
group_permission, members, user_type, roles)
class Role(XML_Object_Base):
def __init__(self, role_id, role_name):
self.id = role_id
self.name = role_name
super().__init__(Elements.ROLE)
@classmethod
def from_xml_node(cls, xml_node):
role_id = get_xml_int_value(xml_node, Elements.ID)
role_name = get_xml_text_value(xml_node, Elements.NAME)
return cls(role_id, role_name)
class Roles(XML_List):
"""
:type roles_list: list[Role]
"""
def __init__(self, roles_list):
self.roles = roles_list
super().__init__(Elements.ROLES, roles_list)
@classmethod
def from_xml_node(cls, xml_node):
roles_list = []
for role_node in xml_node.iter(tag=Elements.ROLE):
roles_list.append(Role.from_xml_node(role_node))
return cls(roles_list)
class Domain(XML_Object_Base):
def __init__(self, domain_id, domain_name):
self.id = domain_id
self.name = domain_name
super().__init__(Elements.DOMAIN)
@classmethod
def from_xml_node(cls, xml_node):
domain_id = get_xml_int_value(xml_node, Elements.ID)
domain_name = get_xml_text_value(xml_node, Elements.NAME)
return cls(domain_id, domain_name)
class Domains(XML_List):
"""
:type domains: list[Domain]
"""
def __init__(self, domains):
self.domains = domains
super().__init__(Elements.DOMAINS, domains)
@classmethod
def from_xml_node(cls, xml_node):
domains = []
for domain_node in xml_node.iter(tag=Elements.DOMAIN):
domains.append(Domain.from_xml_node(domain_node))
return cls(domains)
class MultiGroupChangeImplementResult(XML_Object_Base):
def __init__(self, task_id, task_name, group_changes, implementation_status):
self.id = task_id
self.name = task_name
self.implementation_status = implementation_status
self.group_changes = group_changes
super().__init__(Elements.MULTI_ACCESS_REQUESTS)
@classmethod
def from_xml_node(cls, xml_node):
task_id = get_xml_int_value(xml_node, Elements.ID)
task_name = get_xml_text_value(xml_node, Elements.NAME)
implementation_status = get_xml_text_value(xml_node, Elements.IMPLEMENTATION_STATUS)
group_changes = []
for group_change_node in xml_node.findall(Elements.GROUP_CHANGE):
group_change = Group_Change_Node.from_xml_node(group_change_node)
group_changes.append(group_change)
return cls(task_id, task_name, group_changes, implementation_status)
class Ticket_History_Activities(XML_List):
"""
This class represents a SecureChange ticket history object.
"""
def __init__(self, ticket_id, ticket_activities):
self.ticket_id = ticket_id
self._step_durations = {}
super().__init__(Elements.TICKET_HISTORY_ACTIVITIES, ticket_activities)
self.sort()
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
ticket_id = get_xml_int_value(xml_node, Elements.TICKET_ID)
history_activities = []
for history_activity_node in xml_node.iter(tag=Elements.TICKET_HISTORY_ACTIVITY):
history_activities.append(Ticket_History_Activity.from_xml_node(history_activity_node))
return cls(ticket_id, history_activities)
def sort(self):
self._list_data = sorted(self._list_data, key=lambda x: x.as_time_obj_with_tz())
def get_step_durations(self, time_unit_in_seconds=definitions.Time_Units.Seconds.value):
step_times = OrderedDict()
step_durations = OrderedDict()
previous_step_name = None
step_name = None
step_index = None
first_step_name = self[0].step_name
for step_index, step in enumerate(self):
if step.step_name == first_step_name:
continue
elif not step.step_name:
logger.warn("Step name for entry in index %s in ticket history is empty.", step_index)
if step.step_name != previous_step_name:
step_times[step.step_name] = {"start": self[step_index].as_time_obj()}
if previous_step_name:
step_times[previous_step_name]["end"] = self[step_index - 1].as_time_obj()
previous_step_name = step.step_name
try:
step_times[step_name]["end"] = self[step_index].as_time_obj()
except KeyError:
pass
for step in step_times:
try:
step_durations[step] = convert_timedelta_to_seconds(
step_times[step]["end"] - step_times[step]["start"]) / time_unit_in_seconds
except KeyError:
logger.error("Failed to get step duration for step name '{}'".format(step))
pass
return step_durations
def get_step_states(self):
step_states = OrderedDict()
for history_item in self._list_data:
step_name = history_item.step_name
try:
step_state = definitions.Ticket_Activity.find_matching_state(history_item.description)
except ValueError:
logger.debug("Step: {}, state: '{}' was not found - ignoring it".format(step_name,
history_item.description))
continue
step_states[step_name] = step_state
return step_states
class Ticket_History_Activity(XML_Object_Base):
DATE_STRING_LENGTH = 29
LEGACY_DATE_STRING_LENGTH = 25
OTHER_DATE_STRING_LENGTH = 24
OTHER_OTHER_DATE_STRING_LENGTH = 20
DATE_FORMAT_STRING = "%Y-%m-%dT%H:%M:%S.%f%z"
LEGACY_DATE_FORMAT_STRING = "%Y-%m-%dT%H:%M:%S%z"
OTHER_DATE_FORMAT_STRING = "%Y-%m-%dT%H:%M:%S.%fZ"
OTHER_OTHER_DATE_FORMAT_STRING = "%Y-%m-%dT%H:%M:%SZ"
def __init__(self, date, performed_by, description, step_name, task_name):
self.date = date
self.performed_by = performed_by
self.description = description
self.step_name = step_name
self.task_name = task_name
super().__init__(Elements.TICKET_HISTORY_ACTIVITY)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
date = get_xml_text_value(xml_node, Elements.DATE)
performed_by = get_xml_text_value(xml_node, Elements.PERFORMED_BY)
description = get_xml_text_value(xml_node, Elements.DESCRIPTION)
step_name = get_xml_text_value(xml_node, Elements.STEP_NAME)
task_name = get_xml_text_value(xml_node, Elements.TASK_NAME)
return cls(date, performed_by, description, step_name, task_name)
def as_time_obj(self):
time_string = self.date[:-3] + self.date[-2:]
if len(self.date) == Ticket_History_Activity.DATE_STRING_LENGTH:
return datetime.datetime.strptime(time_string, Ticket_History_Activity.DATE_FORMAT_STRING)
elif len(self.date) == Ticket_History_Activity.LEGACY_DATE_STRING_LENGTH:
return datetime.datetime.strptime(time_string, Ticket_History_Activity.LEGACY_DATE_FORMAT_STRING)
elif len(self.date) == Ticket_History_Activity.OTHER_DATE_STRING_LENGTH:
return datetime.datetime.strptime(time_string, Ticket_History_Activity.OTHER_DATE_FORMAT_STRING)
elif len(self.date) == Ticket_History_Activity.OTHER_OTHER_DATE_STRING_LENGTH:
return datetime.datetime.strptime(time_string, Ticket_History_Activity.OTHER_OTHER_DATE_FORMAT_STRING)
else:
raise ValueError("Unknown date string format: {}".format(self.date))
def as_time_obj_with_tz(self):
dt = self.as_time_obj()
if dt.tzinfo:
return dt
else:
return dt.replace(tzinfo=datetime.timezone.utc)
class Application_Details(Base_Link_Target):
def __init__(self, app_id, display_name, name, link):
super().__init__(Elements.APPLICATION_DETAILS, app_id, display_name, name, link)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
app_id = get_xml_int_value(xml_node, Elements.ID)
name = get_xml_text_value(xml_node, Elements.NAME)
display_name = get_xml_text_value(xml_node, Elements.DISPLAY_NAME)
link = URL_Link.from_xml_node(get_xml_node(xml_node, Elements.LINK))
return cls(app_id, display_name, name, link)
class Comment(XML_Object_Base):
def __init__(self, comment, comment_tag=Elements.COMMENT):
self.comment = comment
super().__init__(comment_tag)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
raise NotImplementedError("from_xml_node must be implemented by derived classes.")
class Reassign_Comment(Comment):
def __init__(self, comment):
super().__init__(comment, Elements.REASSIGN_TASK_COMMENT)
class Redo_Comment(Comment):
def __init__(self, comment):
super().__init__(comment, Elements.REDO_STEP_COMMENT)
class IpAddress(XML_Object_Base):
def __init__(self, ip_address):
self.ip_address = ip_address
super().__init__(Elements.IP_ADDRESS)
@classmethod
def from_xml_node(cls, xml_node):
return cls(xml_node.text)
def __str__(self):
return self.ip_address
class ExcludedDevice(Flat_XML_Object_Base, Comparable):
def __init__(self, device_id):
self.id = device_id
super().__init__(xml_tag=Elements.ID, content=device_id)
@classmethod
def from_xml_node(cls, xml_node):
return cls(xml_node.text)
def __str__(self):
return str(self.id)
def __repr__(self):
return str(self)
def _key(self):
return self.id,
class ExcludedDevicesList(XML_List):
def __init__(self, devices):
"""
:param devices:
:type devices: list[int|ExcludedDevice]
"""
self.excluded_devices = []
for device in devices:
if isinstance(device, int):
self.excluded_devices.append(ExcludedDevice(device))
elif isinstance(device, ExcludedDevice):
self.excluded_devices.append(device)
else:
raise TypeError("Elements of 'devices' must be of type int or ExcludedDevice")
super().__init__(Elements.DEVICE_IDS, self.excluded_devices)
@classmethod
def from_xml_node(cls, xml_node):
device_ids = []
for device_node in xml_node.iter(tag=Elements.ID):
device_ids.append(ExcludedDevice.from_xml_node(device_node))
return cls(device_ids)
class RejectComment(Comment):
def __init__(self, comment):
super().__init__(comment, Elements.REJECT_COMMENT)
|
Tufin/pytos
|
pytos/securechange/xml_objects/rest.py
|
Python
|
apache-2.0
| 87,654
|
import os
import xml.etree.ElementTree as ET
import json
import argparse
from PIL import Image
def sprite_data(attrib):
return {
"frame": {
"x": int(attrib['x']),
"y": int(attrib['y']),
"w": int(attrib['width']),
"h": int(attrib['height'])
},
"rotated": False,
"trimmed": False,
"spriteSourceSize": {
"x": 0,
"y": 0,
"w": int(attrib['width']),
"h": int(attrib['height'])
},
"sourceSize": {
"w": int(attrib['width']),
"h": int(attrib['height'])
}
}
def parse_xml(filename):
tree = ET.parse(filename)
root = tree.getroot()
data = {}
for child in root:
data[child.attrib['name']] = sprite_data(child.attrib)
return root, data
def image_meta(filename):
im = Image.open(filename)
return {
"version": "1.0",
"image": os.path.split(filename)[1],
"size": {
"w": im.size[0],
"h": im.size[1]
}
}
def convert(path):
base_path = os.path.split(path)[0]
root, frames = parse_xml(path)
meta = image_meta(os.path.join(base_path, root.attrib['imagePath']))
data = {"frames": frames, "meta": meta}
json_file = file(os.path.splitext(path)[0] + '.json', 'w')
json.dump(data, json_file, indent=2)
def main():
parser = argparse.ArgumentParser(
description='Convert XML theme from feathers to XML for gown.js.')
parser.add_argument('path', type=str,
help='path to xml file')
args = parser.parse_args()
convert(args.path)
if __name__ == '__main__':
main()
|
ChainedLupine/ludum-dare-24-jam-entry
|
web/gown.js/themes/xml_to_json.py
|
Python
|
gpl-2.0
| 1,708
|
"""
Copyright 2017 Oliver Smith
This file is part of pmbootstrap.
pmbootstrap is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pmbootstrap is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pmbootstrap. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import pytest
# Import from parent directory
pmb_src = os.path.realpath(os.path.join(os.path.dirname(__file__) + "/.."))
sys.path.append(pmb_src)
import pmb.chroot.apk_static
import pmb.parse.apkindex
import pmb.helpers.logging
import pmb.parse.bootimg
@pytest.fixture
def args(request):
import pmb.parse
sys.argv = ["pmbootstrap.py", "chroot"]
args = pmb.parse.arguments()
args.log = args.work + "/log_testsuite.txt"
pmb.helpers.logging.init(args)
request.addfinalizer(args.logfd.close)
return args
def test_bootimg_invalid_path(args):
with pytest.raises(RuntimeError) as e:
pmb.parse.bootimg(args, "/invalid-path")
assert "Could not find file" in str(e.value)
def test_bootimg_kernel(args):
path = pmb_src + "/test/testdata/bootimg/kernel-boot.img"
with pytest.raises(RuntimeError) as e:
pmb.parse.bootimg(args, path)
assert "heimdall-isorec" in str(e.value)
def test_bootimg_invalid_file(args):
with pytest.raises(RuntimeError) as e:
pmb.parse.bootimg(args, __file__)
assert "File is not an Android bootimg" in str(e.value)
def test_bootimg_normal(args):
path = pmb_src + "/test/testdata/bootimg/normal-boot.img"
output = {"base": "0x80000000",
"kernel_offset": "0x00008000",
"ramdisk_offset": "0x04000000",
"second_offset": "0x00f00000",
"tags_offset": "0x0e000000",
"pagesize": "2048",
"cmdline": "bootopt=64S3,32S1,32S1",
"qcdt": "false"}
assert pmb.parse.bootimg(args, path) == output
def test_bootimg_qcdt(args):
path = pmb_src + "/test/testdata/bootimg/qcdt-boot.img"
output = {"base": "0x80000000",
"kernel_offset": "0x00008000",
"ramdisk_offset": "0x04000000",
"second_offset": "0x00f00000",
"tags_offset": "0x0e000000",
"pagesize": "2048",
"cmdline": "bootopt=64S3,32S1,32S1",
"qcdt": "true"}
assert pmb.parse.bootimg(args, path) == output
|
lawl/pmbootstrap
|
test/test_bootimg.py
|
Python
|
gpl-3.0
| 2,767
|
import unittest
class Tester(unittest.TestCase):
def test_list_comprehension(self):
a = [(x, y)
for x in [1, 2, 3]
for y in [3, 1, 4]
if x != y]
b = []
for x in [1, 2, 3]:
for y in [3, 1, 4]:
if x != y:
b.append((x, y))
self.assertEqual(a, b)
def test_dict_comprehension(self):
_dict = {x: x**2 for x in (2, 4, 6)}
self.assertEqual(
_dict,
{2: 4, 4: 16, 6: 36}
)
_dict = {k: v for k, v in [(2, 1), (4, 2), (6, 3)]}
self.assertEqual(
_dict,
dict([(2, 1), (4, 2), (6, 3)])
)
_dict = {k: v for k, v in _dict.items()}
self.assertEqual(
_dict,
dict([(2, 1), (4, 2), (6, 3)])
)
_dict = dict([(x, x) for x in 'abcabcaaadrr' if x not in 'abc'])
self.assertEqual(
_dict,
{'d': 'd', 'r': 'r'}
)
if __name__ == "__main__":
unittest.main()
|
mr-uuid/snippets
|
python/comprehension.py
|
Python
|
mit
| 1,057
|
import os
import subprocess
import sys
from queue import Queue
sys.path.insert(0, ".")
import unittest
from bears.tests.LocalBearTestHelper import LocalBearTestHelper
from bears.natural_language.AlexBear import AlexBear
from coalib.settings.Section import Section
class AlexBearTest(LocalBearTestHelper):
def setUp(self):
self.section = Section("test section")
self.uut = AlexBear(self.section, Queue())
self.test_file1 = os.path.join(os.path.dirname(__file__),
"test_files",
"alex_test1.md")
self.test_file2 = os.path.join(os.path.dirname(__file__),
"test_files",
"alex_test2.md")
def test_run(self):
# Test a file with no issues
self.assertLinesValid(self.uut, [], self.test_file1)
# Test a file with issues
self.assertLinesInvalid(self.uut, [], self.test_file2)
def skip_test():
try:
subprocess.Popen(['alex', '--version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return False
except OSError:
return "Alex is not installed."
if __name__ == '__main__':
unittest.main(verbosity=2)
|
Tanmay28/coala
|
bears/tests/natural_language/AlexBearTest.py
|
Python
|
agpl-3.0
| 1,313
|
# -*- coding: utf-8 -*-
import os
import base64
from random import choice
def random_file_from_dir(relative_path):
random_file = choice(os.listdir(os.path.join(os.getcwd(), relative_path)))
return abs_path_to_file(os.path.join(relative_path, random_file))
def abs_path_to_file(relative_path):
# print os.getcwd()
return os.path.abspath(os.path.join(os.getcwd(), relative_path))
def encode_base64(abs_path):
print "abs_path", abs_path
with open(abs_path, 'rb') as f:
return base64.b64encode(f.read())
|
alazanman/py_epg_tests
|
utils/file_util.py
|
Python
|
apache-2.0
| 535
|
from ert.enkf.enums.realization_state_enum import RealizationStateEnum
from ert.test import ErtTestContext, ExtendedTestCase
from ert.util import BoolVector
class LoadResultsManuallyTest(ExtendedTestCase):
def setUp(self):
self.config_file = self.createTestPath("Statoil/config/with_data/config")
def test_load_results_manually(self):
with ErtTestContext("manual_load_test", self.config_file) as test_context:
ert = test_context.getErt()
load_into_case = "A1"
load_from_case = "default"
load_into = ert.getEnkfFsManager().getFileSystem(load_into_case)
load_from = ert.getEnkfFsManager().getFileSystem(load_from_case)
ert.getEnkfFsManager().switchFileSystem(load_from)
realisations = BoolVector(default_value=True,initial_size=25)
realisations[7] = False
iteration = 0
ert.loadFromForwardModel(realisations, iteration, load_into)
load_into_case_state_map = load_into.getStateMap()
load_into_states = [state for state in load_into_case_state_map]
expected = [RealizationStateEnum.STATE_HAS_DATA] * 25
expected[7] = RealizationStateEnum.STATE_UNDEFINED
self.assertListEqual(load_into_states, expected)
|
arielalmendral/ert
|
python/tests/ert/enkf/test_enkf_load_results_manually.py
|
Python
|
gpl-3.0
| 1,338
|
## Allow this directory to function as a module within which we can import
## individual modules.
pass
|
curiouserrandy/Python-Utils
|
__init__.py
|
Python
|
gpl-2.0
| 103
|
#
# This file is part of Mapnik (C++/Python mapping toolkit)
# Copyright (C) 2009 Artem Pavlenko
#
# Mapnik is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
"""Mapnik Python module.
Boost Python bindings to the Mapnik C++ shared library.
Several things happen when you do:
>>> import mapnik
1) Mapnik C++ objects are imported via the '__init__.py' from the '_mapnik.so' shared object
(_mapnik.pyd on win) which references libmapnik.so (linux), libmapnik.dylib (mac), or
mapnik.dll (win32).
2) The paths to the input plugins and font directories are imported from the 'paths.py'
file which was constructed and installed during SCons installation.
3) All available input plugins and TrueType fonts are automatically registered.
4) Boost Python metaclass injectors are used in the '__init__.py' to extend several
objects adding extra convenience when accessed via Python.
"""
import itertools
import os
import sys
import warnings
try:
import json
except ImportError:
import simplejson as json
def bootstrap_env():
"""
If an optional settings file exists, inherit its
environment settings before loading the mapnik library.
This feature is intended for customized packages of mapnik.
The settings file should be a python file with an 'env' variable
that declares a dictionary of key:value pairs to push into the
global process environment, if not already set, like:
env = {'ICU_DATA':'/usr/local/share/icu/'}
"""
if os.path.exists(os.path.join(os.path.dirname(__file__),'mapnik_settings.py')):
from mapnik_settings import env
process_keys = os.environ.keys()
for key, value in env.items():
if key not in process_keys:
os.environ[key] = value
bootstrap_env()
from _mapnik import *
import printing
printing.renderer = render
# The base Boost.Python class
BoostPythonMetaclass = Coord.__class__
class _MapnikMetaclass(BoostPythonMetaclass):
def __init__(self, name, bases, dict):
for b in bases:
if type(b) not in (self, type):
for k,v in list(dict.items()):
if hasattr(b, k):
setattr(b, '_c_'+k, getattr(b, k))
setattr(b,k,v)
return type.__init__(self, name, bases, dict)
# metaclass injector compatible with both python 2 and 3
# http://mikewatkins.ca/2008/11/29/python-2-and-3-metaclasses/
_injector = _MapnikMetaclass('_injector', (object, ), {})
def Filter(*args,**kwargs):
warnings.warn("'Filter' is deprecated and will be removed in Mapnik 3.x, use 'Expression' instead",
DeprecationWarning, 2)
return Expression(*args, **kwargs)
class Envelope(Box2d):
def __init__(self, *args, **kwargs):
warnings.warn("'Envelope' is deprecated and will be removed in Mapnik 3.x, use 'Box2d' instead",
DeprecationWarning, 2)
Box2d.__init__(self, *args, **kwargs)
class _Coord(Coord,_injector):
"""
Represents a point with two coordinates (either lon/lat or x/y).
Following operators are defined for Coord:
Addition and subtraction of Coord objects:
>>> Coord(10, 10) + Coord(20, 20)
Coord(30.0, 30.0)
>>> Coord(10, 10) - Coord(20, 20)
Coord(-10.0, -10.0)
Addition, subtraction, multiplication and division between
a Coord and a float:
>>> Coord(10, 10) + 1
Coord(11.0, 11.0)
>>> Coord(10, 10) - 1
Coord(-9.0, -9.0)
>>> Coord(10, 10) * 2
Coord(20.0, 20.0)
>>> Coord(10, 10) / 2
Coord(5.0, 5.0)
Equality of coords (as pairwise equality of components):
>>> Coord(10, 10) is Coord(10, 10)
False
>>> Coord(10, 10) == Coord(10, 10)
True
"""
def __repr__(self):
return 'Coord(%s,%s)' % (self.x, self.y)
def forward(self, projection):
"""
Projects the point from the geographic coordinate
space into the cartesian space. The x component is
considered to be longitude, the y component the
latitude.
Returns the easting (x) and northing (y) as a
coordinate pair.
Example: Project the geographic coordinates of the
city center of Stuttgart into the local
map projection (GK Zone 3/DHDN, EPSG 31467)
>>> p = Projection('+init=epsg:31467')
>>> Coord(9.1, 48.7).forward(p)
Coord(3507360.12813,5395719.2749)
"""
return forward_(self, projection)
def inverse(self, projection):
"""
Projects the point from the cartesian space
into the geographic space. The x component is
considered to be the easting, the y component
to be the northing.
Returns the longitude (x) and latitude (y) as a
coordinate pair.
Example: Project the cartesian coordinates of the
city center of Stuttgart in the local
map projection (GK Zone 3/DHDN, EPSG 31467)
into geographic coordinates:
>>> p = Projection('+init=epsg:31467')
>>> Coord(3507360.12813,5395719.2749).inverse(p)
Coord(9.1, 48.7)
"""
return inverse_(self, projection)
class _Box2d(Box2d,_injector):
"""
Represents a spatial envelope (i.e. bounding box).
Following operators are defined for Box2d:
Addition:
e1 + e2 is equvalent to e1.expand_to_include(e2) but yields
a new envelope instead of modifying e1
Subtraction:
Currently e1 - e2 returns e1.
Multiplication and division with floats:
Multiplication and division change the width and height of the envelope
by the given factor without modifying its center..
That is, e1 * x is equivalent to:
e1.width(x * e1.width())
e1.height(x * e1.height()),
except that a new envelope is created instead of modifying e1.
e1 / x is equivalent to e1 * (1.0/x).
Equality: two envelopes are equal if their corner points are equal.
"""
def __repr__(self):
return 'Box2d(%s,%s,%s,%s)' % \
(self.minx,self.miny,self.maxx,self.maxy)
def forward(self, projection):
"""
Projects the envelope from the geographic space
into the cartesian space by projecting its corner
points.
See also:
Coord.forward(self, projection)
"""
return forward_(self, projection)
def inverse(self, projection):
"""
Projects the envelope from the cartesian space
into the geographic space by projecting its corner
points.
See also:
Coord.inverse(self, projection).
"""
return inverse_(self, projection)
class _Projection(Projection,_injector):
def __repr__(self):
return "Projection('%s')" % self.params()
def forward(self,obj):
"""
Projects the given object (Box2d or Coord)
from the geographic space into the cartesian space.
See also:
Box2d.forward(self, projection),
Coord.forward(self, projection).
"""
return forward_(obj,self)
def inverse(self,obj):
"""
Projects the given object (Box2d or Coord)
from the cartesian space into the geographic space.
See also:
Box2d.inverse(self, projection),
Coord.inverse(self, projection).
"""
return inverse_(obj,self)
class _Feature(Feature,_injector):
__geo_interface__ = property(lambda self: json.loads(self.to_geojson()))
class _Path(Path,_injector):
__geo_interface__ = property(lambda self: json.loads(self.to_geojson()))
class _Datasource(Datasource,_injector):
def all_features(self,fields=None,variables={}):
query = Query(self.envelope())
query.set_variables(variables);
attributes = fields or self.fields()
for fld in attributes:
query.add_property_name(fld)
return self.features(query).features
def featureset(self,fields=None,variables={}):
query = Query(self.envelope())
query.set_variables(variables);
attributes = fields or self.fields()
for fld in attributes:
query.add_property_name(fld)
return self.features(query)
class _Color(Color,_injector):
def __repr__(self):
return "Color(R=%d,G=%d,B=%d,A=%d)" % (self.r,self.g,self.b,self.a)
class _SymbolizerBase(SymbolizerBase,_injector):
# back compatibility
@property
def filename(self):
return self['file']
@filename.setter
def filename(self, val):
self['file'] = val
def _add_symbol_method_to_symbolizers(vars=globals()):
def symbol_for_subcls(self):
return self
def symbol_for_cls(self):
return getattr(self,self.type())()
for name, obj in vars.items():
if name.endswith('Symbolizer') and not name.startswith('_'):
if name == 'Symbolizer':
symbol = symbol_for_cls
else:
symbol = symbol_for_subcls
type('dummy', (obj,_injector), {'symbol': symbol})
_add_symbol_method_to_symbolizers()
def Datasource(**keywords):
"""Wrapper around CreateDatasource.
Create a Mapnik Datasource using a dictionary of parameters.
Keywords must include:
type='plugin_name' # e.g. type='gdal'
See the convenience factory methods of each input plugin for
details on additional required keyword arguments.
"""
return CreateDatasource(keywords)
# convenience factory methods
def Shapefile(**keywords):
"""Create a Shapefile Datasource.
Required keyword arguments:
file -- path to shapefile without extension
Optional keyword arguments:
base -- path prefix (default None)
encoding -- file encoding (default 'utf-8')
>>> from mapnik import Shapefile, Layer
>>> shp = Shapefile(base='/home/mapnik/data',file='world_borders')
>>> lyr = Layer('Shapefile Layer')
>>> lyr.datasource = shp
"""
keywords['type'] = 'shape'
return CreateDatasource(keywords)
def CSV(**keywords):
"""Create a CSV Datasource.
Required keyword arguments:
file -- path to csv
Optional keyword arguments:
inline -- inline CSV string (if provided 'file' argument will be ignored and non-needed)
base -- path prefix (default None)
encoding -- file encoding (default 'utf-8')
row_limit -- integer limit of rows to return (default: 0)
strict -- throw an error if an invalid row is encountered
escape -- The escape character to use for parsing data
quote -- The quote character to use for parsing data
separator -- The separator character to use for parsing data
headers -- A comma separated list of header names that can be set to add headers to data that lacks them
filesize_max -- The maximum filesize in MB that will be accepted
>>> from mapnik import CSV
>>> csv = CSV(file='test.csv')
>>> from mapnik import CSV
>>> csv = CSV(inline='''wkt,Name\n"POINT (120.15 48.47)","Winthrop, WA"''')
For more information see https://github.com/mapnik/mapnik/wiki/CSV-Plugin
"""
keywords['type'] = 'csv'
return CreateDatasource(keywords)
def GeoJSON(**keywords):
"""Create a GeoJSON Datasource.
Required keyword arguments:
file -- path to json
Optional keyword arguments:
encoding -- file encoding (default 'utf-8')
base -- path prefix (default None)
>>> from mapnik import GeoJSON
>>> geojson = GeoJSON(file='test.json')
"""
keywords['type'] = 'geojson'
return CreateDatasource(keywords)
def PostGIS(**keywords):
"""Create a PostGIS Datasource.
Required keyword arguments:
dbname -- database name to connect to
table -- table name or subselect query
*Note: if using subselects for the 'table' value consider also
passing the 'geometry_field' and 'srid' and 'extent_from_subquery'
options and/or specifying the 'geometry_table' option.
Optional db connection keyword arguments:
user -- database user to connect as (default: see postgres docs)
password -- password for database user (default: see postgres docs)
host -- portgres hostname (default: see postgres docs)
port -- postgres port (default: see postgres docs)
initial_size -- integer size of connection pool (default: 1)
max_size -- integer max of connection pool (default: 10)
persist_connection -- keep connection open (default: True)
Optional table-level keyword arguments:
extent -- manually specified data extent (comma delimited string, default: None)
estimate_extent -- boolean, direct PostGIS to use the faster, less accurate `estimate_extent` over `extent` (default: False)
extent_from_subquery -- boolean, direct Mapnik to query Postgis for the extent of the raw 'table' value (default: uses 'geometry_table')
geometry_table -- specify geometry table to use to look up metadata (default: automatically parsed from 'table' value)
geometry_field -- specify geometry field to use (default: first entry in geometry_columns)
srid -- specify srid to use (default: auto-detected from geometry_field)
row_limit -- integer limit of rows to return (default: 0)
cursor_size -- integer size of binary cursor to use (default: 0, no binary cursor is used)
>>> from mapnik import PostGIS, Layer
>>> params = dict(dbname=env['MAPNIK_NAME'],table='osm',user='postgres',password='gis')
>>> params['estimate_extent'] = False
>>> params['extent'] = '-20037508,-19929239,20037508,19929239'
>>> postgis = PostGIS(**params)
>>> lyr = Layer('PostGIS Layer')
>>> lyr.datasource = postgis
"""
keywords['type'] = 'postgis'
return CreateDatasource(keywords)
def PgRaster(**keywords):
"""Create a PgRaster Datasource.
Required keyword arguments:
dbname -- database name to connect to
table -- table name or subselect query
*Note: if using subselects for the 'table' value consider also
passing the 'raster_field' and 'srid' and 'extent_from_subquery'
options and/or specifying the 'raster_table' option.
Optional db connection keyword arguments:
user -- database user to connect as (default: see postgres docs)
password -- password for database user (default: see postgres docs)
host -- portgres hostname (default: see postgres docs)
port -- postgres port (default: see postgres docs)
initial_size -- integer size of connection pool (default: 1)
max_size -- integer max of connection pool (default: 10)
persist_connection -- keep connection open (default: True)
Optional table-level keyword arguments:
extent -- manually specified data extent (comma delimited string, default: None)
estimate_extent -- boolean, direct PostGIS to use the faster, less accurate `estimate_extent` over `extent` (default: False)
extent_from_subquery -- boolean, direct Mapnik to query Postgis for the extent of the raw 'table' value (default: uses 'geometry_table')
raster_table -- specify geometry table to use to look up metadata (default: automatically parsed from 'table' value)
raster_field -- specify geometry field to use (default: first entry in raster_columns)
srid -- specify srid to use (default: auto-detected from geometry_field)
row_limit -- integer limit of rows to return (default: 0)
cursor_size -- integer size of binary cursor to use (default: 0, no binary cursor is used)
use_overviews -- boolean, use overviews when available (default: false)
prescale_rasters -- boolean, scale rasters on the db side (default: false)
clip_rasters -- boolean, clip rasters on the db side (default: false)
band -- integer, if non-zero interprets the given band (1-based offset) as a data raster (default: 0)
>>> from mapnik import PgRaster, Layer
>>> params = dict(dbname='mapnik',table='osm',user='postgres',password='gis')
>>> params['estimate_extent'] = False
>>> params['extent'] = '-20037508,-19929239,20037508,19929239'
>>> pgraster = PgRaster(**params)
>>> lyr = Layer('PgRaster Layer')
>>> lyr.datasource = pgraster
"""
keywords['type'] = 'pgraster'
return CreateDatasource(keywords)
def Raster(**keywords):
"""Create a Raster (Tiff) Datasource.
Required keyword arguments:
file -- path to stripped or tiled tiff
lox -- lowest (min) x/longitude of tiff extent
loy -- lowest (min) y/latitude of tiff extent
hix -- highest (max) x/longitude of tiff extent
hiy -- highest (max) y/latitude of tiff extent
Hint: lox,loy,hix,hiy make a Mapnik Box2d
Optional keyword arguments:
base -- path prefix (default None)
multi -- whether the image is in tiles on disk (default False)
Multi-tiled keyword arguments:
x_width -- virtual image number of tiles in X direction (required)
y_width -- virtual image number of tiles in Y direction (required)
tile_size -- if an image is in tiles, how large are the tiles (default 256)
tile_stride -- if an image is in tiles, what's the increment between rows/cols (default 1)
>>> from mapnik import Raster, Layer
>>> raster = Raster(base='/home/mapnik/data',file='elevation.tif',lox=-122.8,loy=48.5,hix=-122.7,hiy=48.6)
>>> lyr = Layer('Tiff Layer')
>>> lyr.datasource = raster
"""
keywords['type'] = 'raster'
return CreateDatasource(keywords)
def Gdal(**keywords):
"""Create a GDAL Raster Datasource.
Required keyword arguments:
file -- path to GDAL supported dataset
Optional keyword arguments:
base -- path prefix (default None)
shared -- boolean, open GdalDataset in shared mode (default: False)
bbox -- tuple (minx, miny, maxx, maxy). If specified, overrides the bbox detected by GDAL.
>>> from mapnik import Gdal, Layer
>>> dataset = Gdal(base='/home/mapnik/data',file='elevation.tif')
>>> lyr = Layer('GDAL Layer from TIFF file')
>>> lyr.datasource = dataset
"""
keywords['type'] = 'gdal'
if 'bbox' in keywords:
if isinstance(keywords['bbox'], (tuple, list)):
keywords['bbox'] = ','.join([str(item) for item in keywords['bbox']])
return CreateDatasource(keywords)
def Occi(**keywords):
"""Create a Oracle Spatial (10g) Vector Datasource.
Required keyword arguments:
user -- database user to connect as
password -- password for database user
host -- oracle host to connect to (does not refer to SID in tsnames.ora)
table -- table name or subselect query
Optional keyword arguments:
initial_size -- integer size of connection pool (default 1)
max_size -- integer max of connection pool (default 10)
extent -- manually specified data extent (comma delimited string, default None)
estimate_extent -- boolean, direct Oracle to use the faster, less accurate estimate_extent() over extent() (default False)
encoding -- file encoding (default 'utf-8')
geometry_field -- specify geometry field (default 'GEOLOC')
use_spatial_index -- boolean, force the use of the spatial index (default True)
>>> from mapnik import Occi, Layer
>>> params = dict(host='myoracle',user='scott',password='tiger',table='test')
>>> params['estimate_extent'] = False
>>> params['extent'] = '-20037508,-19929239,20037508,19929239'
>>> oracle = Occi(**params)
>>> lyr = Layer('Oracle Spatial Layer')
>>> lyr.datasource = oracle
"""
keywords['type'] = 'occi'
return CreateDatasource(keywords)
def Ogr(**keywords):
"""Create a OGR Vector Datasource.
Required keyword arguments:
file -- path to OGR supported dataset
layer -- name of layer to use within datasource (optional if layer_by_index or layer_by_sql is used)
Optional keyword arguments:
layer_by_index -- choose layer by index number instead of by layer name or sql.
layer_by_sql -- choose layer by sql query number instead of by layer name or index.
base -- path prefix (default None)
encoding -- file encoding (default 'utf-8')
>>> from mapnik import Ogr, Layer
>>> datasource = Ogr(base='/home/mapnik/data',file='rivers.geojson',layer='OGRGeoJSON')
>>> lyr = Layer('OGR Layer from GeoJSON file')
>>> lyr.datasource = datasource
"""
keywords['type'] = 'ogr'
return CreateDatasource(keywords)
def SQLite(**keywords):
"""Create a SQLite Datasource.
Required keyword arguments:
file -- path to SQLite database file
table -- table name or subselect query
Optional keyword arguments:
base -- path prefix (default None)
encoding -- file encoding (default 'utf-8')
extent -- manually specified data extent (comma delimited string, default None)
metadata -- name of auxillary table containing record for table with xmin, ymin, xmax, ymax, and f_table_name
geometry_field -- name of geometry field (default 'the_geom')
key_field -- name of primary key field (default 'OGC_FID')
row_offset -- specify a custom integer row offset (default 0)
row_limit -- specify a custom integer row limit (default 0)
wkb_format -- specify a wkb type of 'spatialite' (default None)
use_spatial_index -- boolean, instruct sqlite plugin to use Rtree spatial index (default True)
>>> from mapnik import SQLite, Layer
>>> sqlite = SQLite(base='/home/mapnik/data',file='osm.db',table='osm',extent='-20037508,-19929239,20037508,19929239')
>>> lyr = Layer('SQLite Layer')
>>> lyr.datasource = sqlite
"""
keywords['type'] = 'sqlite'
return CreateDatasource(keywords)
def Rasterlite(**keywords):
"""Create a Rasterlite Datasource.
Required keyword arguments:
file -- path to Rasterlite database file
table -- table name or subselect query
Optional keyword arguments:
base -- path prefix (default None)
extent -- manually specified data extent (comma delimited string, default None)
>>> from mapnik import Rasterlite, Layer
>>> rasterlite = Rasterlite(base='/home/mapnik/data',file='osm.db',table='osm',extent='-20037508,-19929239,20037508,19929239')
>>> lyr = Layer('Rasterlite Layer')
>>> lyr.datasource = rasterlite
"""
keywords['type'] = 'rasterlite'
return CreateDatasource(keywords)
def Osm(**keywords):
"""Create a Osm Datasource.
Required keyword arguments:
file -- path to OSM file
Optional keyword arguments:
encoding -- file encoding (default 'utf-8')
url -- url to fetch data (default None)
bbox -- data bounding box for fetching data (default None)
>>> from mapnik import Osm, Layer
>>> datasource = Osm(file='test.osm')
>>> lyr = Layer('Osm Layer')
>>> lyr.datasource = datasource
"""
# note: parser only supports libxml2 so not exposing option
# parser -- xml parser to use (default libxml2)
keywords['type'] = 'osm'
return CreateDatasource(keywords)
def Python(**keywords):
"""Create a Python Datasource.
>>> from mapnik import Python, PythonDatasource
>>> datasource = Python('PythonDataSource')
>>> lyr = Layer('Python datasource')
>>> lyr.datasource = datasource
"""
keywords['type'] = 'python'
return CreateDatasource(keywords)
def MemoryDatasource(**keywords):
"""Create a Memory Datasource.
Optional keyword arguments:
(TODO)
"""
params = Parameters()
params.append(Parameter('type','memory'))
return MemoryDatasourceBase(params)
class PythonDatasource(object):
"""A base class for a Python data source.
Optional arguments:
envelope -- a mapnik.Box2d (minx, miny, maxx, maxy) envelope of the data source, default (-180,-90,180,90)
geometry_type -- one of the DataGeometryType enumeration values, default Point
data_type -- one of the DataType enumerations, default Vector
"""
def __init__(self, envelope=None, geometry_type=None, data_type=None):
self.envelope = envelope or Box2d(-180, -90, 180, 90)
self.geometry_type = geometry_type or DataGeometryType.Point
self.data_type = data_type or DataType.Vector
def features(self, query):
"""Return an iterable which yields instances of Feature for features within the passed query.
Required arguments:
query -- a Query instance specifying the region for which features should be returned
"""
return None
def features_at_point(self, point):
"""Rarely uses. Return an iterable which yields instances of Feature for the specified point."""
return None
@classmethod
def wkb_features(cls, keys, features):
"""A convenience function to wrap an iterator yielding pairs of WKB format geometry and dictionaries of
key-value pairs into mapnik features. Return this from PythonDatasource.features() passing it a sequence of keys
to appear in the output and an iterator yielding features.
For example. One might have a features() method in a derived class like the following:
def features(self, query):
# ... create WKB features feat1 and feat2
return mapnik.PythonDatasource.wkb_features(
keys = ( 'name', 'author' ),
features = [
(feat1, { 'name': 'feat1', 'author': 'alice' }),
(feat2, { 'name': 'feat2', 'author': 'bob' }),
]
)
"""
ctx = Context()
[ctx.push(x) for x in keys]
def make_it(feat, idx):
f = Feature(ctx, idx)
geom, attrs = feat
f.add_geometries_from_wkb(geom)
for k, v in attrs.iteritems():
f[k] = v
return f
return itertools.imap(make_it, features, itertools.count(1))
@classmethod
def wkt_features(cls, keys, features):
"""A convenience function to wrap an iterator yielding pairs of WKT format geometry and dictionaries of
key-value pairs into mapnik features. Return this from PythonDatasource.features() passing it a sequence of keys
to appear in the output and an iterator yielding features.
For example. One might have a features() method in a derived class like the following:
def features(self, query):
# ... create WKT features feat1 and feat2
return mapnik.PythonDatasource.wkt_features(
keys = ( 'name', 'author' ),
features = [
(feat1, { 'name': 'feat1', 'author': 'alice' }),
(feat2, { 'name': 'feat2', 'author': 'bob' }),
]
)
"""
ctx = Context()
[ctx.push(x) for x in keys]
def make_it(feat, idx):
f = Feature(ctx, idx)
geom, attrs = feat
f.add_geometries_from_wkt(geom)
for k, v in attrs.iteritems():
f[k] = v
return f
return itertools.imap(make_it, features, itertools.count(1))
class _TextSymbolizer(TextSymbolizer,_injector):
@property
def name(self):
if isinstance(self.properties.format_tree, FormattingText):
return self.properties.format_tree.text
else:
# There is no single expression which could be returned as name
raise RuntimeError("TextSymbolizer uses complex formatting features, but old compatibility interface is used to access it. Use self.properties.format_tree instead.")
@name.setter
def name(self, name):
self.properties.format_tree = FormattingText(name)
@property
def text_size(self):
return self.format.text_size
@text_size.setter
def text_size(self, text_size):
self.format.text_size = text_size
@property
def face_name(self):
return self.format.face_name
@face_name.setter
def face_name(self, face_name):
self.format.face_name = face_name
@property
def fontset(self):
return self.format.fontset
@fontset.setter
def fontset(self, fontset):
self.format.fontset = fontset
@property
def character_spacing(self):
return self.format.character_spacing
@character_spacing.setter
def character_spacing(self, character_spacing):
self.format.character_spacing = character_spacing
@property
def line_spacing(self):
return self.format.line_spacing
@line_spacing.setter
def line_spacing(self, line_spacing):
self.format.line_spacing = line_spacing
@property
def text_opacity(self):
return self.format.text_opacity
@text_opacity.setter
def text_opacity(self, text_opacity):
self.format.text_opacity = text_opacity
@property
def wrap_before(self):
return self.format.wrap_before
@wrap_before.setter
def wrap_before(self, wrap_before):
self.format.wrap_before = wrap_before
@property
def text_transform(self):
return self.format.text_transform
@text_transform.setter
def text_transform(self, text_transform):
self.format.text_transform = text_transform
@property
def fill(self):
return self.format.fill
@fill.setter
def fill(self, fill):
self.format.fill = fill
@property
def halo_fill(self):
return self.format.halo_fill
@halo_fill.setter
def halo_fill(self, halo_fill):
self.format.halo_fill = halo_fill
@property
def halo_radius(self):
return self.format.halo_radius
@halo_radius.setter
def halo_radius(self, halo_radius):
self.format.halo_radius = halo_radius
@property
def label_placement(self):
return self.properties.label_placement
@label_placement.setter
def label_placement(self, label_placement):
self.properties.label_placement = label_placement
@property
def horizontal_alignment(self):
return self.properties.horizontal_alignment
@horizontal_alignment.setter
def horizontal_alignment(self, horizontal_alignment):
self.properties.horizontal_alignment = horizontal_alignment
@property
def justify_alignment(self):
return self.properties.justify_alignment
@justify_alignment.setter
def justify_alignment(self, justify_alignment):
self.properties.justify_alignment = justify_alignment
@property
def vertical_alignment(self):
return self.properties.vertical_alignment
@vertical_alignment.setter
def vertical_alignment(self, vertical_alignment):
self.properties.vertical_alignment = vertical_alignment
@property
def orientation(self):
return self.properties.orientation
@orientation.setter
def orientation(self, orientation):
self.properties.orientation = orientation
@property
def displacement(self):
return self.properties.displacement
@displacement.setter
def displacement(self, displacement):
self.properties.displacement = displacement
@property
def label_spacing(self):
return self.properties.label_spacing
@label_spacing.setter
def label_spacing(self, label_spacing):
self.properties.label_spacing = label_spacing
@property
def label_position_tolerance(self):
return self.properties.label_position_tolerance
@label_position_tolerance.setter
def label_position_tolerance(self, label_position_tolerance):
self.properties.label_position_tolerance = label_position_tolerance
@property
def avoid_edges(self):
return self.properties.avoid_edges
@avoid_edges.setter
def avoid_edges(self, avoid_edges):
self.properties.avoid_edges = avoid_edges
@property
def minimum_distance(self):
return self.properties.minimum_distance
@minimum_distance.setter
def minimum_distance(self, minimum_distance):
self.properties.minimum_distance = minimum_distance
@property
def minimum_padding(self):
return self.properties.minimum_padding
@minimum_padding.setter
def minimum_padding(self, minimum_padding):
self.properties.minimum_padding = minimum_padding
@property
def minimum_path_length(self):
return self.properties.minimum_path_length
@minimum_path_length.setter
def minimum_path_length(self, minimum_path_length):
self.properties.minimum_path_length = minimum_path_length
@property
def maximum_angle_char_delta(self):
return self.properties.maximum_angle_char_delta
@maximum_angle_char_delta.setter
def maximum_angle_char_delta(self, maximum_angle_char_delta):
self.properties.maximum_angle_char_delta = maximum_angle_char_delta
@property
def allow_overlap(self):
return self.properties.allow_overlap
@allow_overlap.setter
def allow_overlap(self, allow_overlap):
self.properties.allow_overlap = allow_overlap
@property
def text_ratio(self):
return self.properties.text_ratio
@text_ratio.setter
def text_ratio(self, text_ratio):
self.properties.text_ratio = text_ratio
@property
def wrap_width(self):
return self.properties.wrap_width
@wrap_width.setter
def wrap_width(self, wrap_width):
self.properties.wrap_width = wrap_width
def mapnik_version_from_string(version_string):
"""Return the Mapnik version from a string."""
n = version_string.split('.')
return (int(n[0]) * 100000) + (int(n[1]) * 100) + (int(n[2]));
def register_plugins(path=None):
"""Register plugins located by specified path"""
if not path:
if os.environ.has_key('MAPNIK_INPUT_PLUGINS_DIRECTORY'):
path = os.environ.get('MAPNIK_INPUT_PLUGINS_DIRECTORY')
else:
from paths import inputpluginspath
path = inputpluginspath
DatasourceCache.register_datasources(path)
def register_fonts(path=None,valid_extensions=['.ttf','.otf','.ttc','.pfa','.pfb','.ttc','.dfont','.woff']):
"""Recursively register fonts using path argument as base directory"""
if not path:
if os.environ.has_key('MAPNIK_FONT_DIRECTORY'):
path = os.environ.get('MAPNIK_FONT_DIRECTORY')
else:
from paths import fontscollectionpath
path = fontscollectionpath
for dirpath, _, filenames in os.walk(path):
for filename in filenames:
if os.path.splitext(filename.lower())[1] in valid_extensions:
FontEngine.instance().register_font(os.path.join(dirpath, filename))
# auto-register known plugins and fonts
register_plugins()
register_fonts()
|
qianwenming/mapnik
|
bindings/python/mapnik/__init__.py
|
Python
|
lgpl-2.1
| 35,583
|
import mongoengine as db
class User(db.Document):
user_id = db.StringField(required=True, unique=True)
created = db.DateTimeField(required=True)
last_login = db.DateTimeField()
nino = db.StringField()
linked_ids = db.ListField(db.ReferenceField('User'), default=[])
def link(self, other):
self.update(push__linked_ids=other)
other.update(push__linked_ids=self)
|
crossgovernmentservices/userstore-prototype
|
application/models.py
|
Python
|
mit
| 404
|
import xml.etree.ElementTree as ElementTree
import sys, getopt, os.path, time, urlparse, urllib, datetime, re
# Check that a file argument was supplied
if len(sys.argv) < 2:
print 'No input file specified'
sys.exit(2)
filename = sys.argv[1]
# Check that the file exists
if not os.path.isfile(filename):
print 'Invalid file name'
sys.exit(2)
# Check that this is a FCP XML file
if not filename.endswith('.fcpxml'):
print 'Invalid file extension'
sys.exit(2)
# Parse the file
try:
tree = ElementTree.parse(filename)
except ElementTree.ParseError:
print 'Unable to parse file'
sys.exit(2)
project = tree.find('project')
# Check that a project exists
if project is None:
print 'Incorrect XML format'
sys.exit(2)
# Remove "replaced" from project name
if project.attrib['name']:
project.attrib['name'] = re.sub(r'\sreplaced$', '', project.attrib['name'])
clips = {}
# Loop through the clips
for clip in project.findall('clip'):
name = clip.attrib['name']
if not name in clips:
# Get the video file
ref = clip.find('.//video').attrib['ref']
src = project.find('.//asset[@id="' + ref + '"]').attrib['src']
path = urlparse.urlparse(src).path
path = urllib.unquote(path)
stat = os.stat(path)
date = datetime.datetime.fromtimestamp(stat.st_birthtime)
metadata = clip.find('./metadata')
if metadata is None:
metadata = ElementTree.SubElement(clip, 'metadata')
if metadata.find('./md[@key="com.apple.proapps.custom.dateCreated"]') is None:
# @todo include the timezone offset
md = ElementTree.SubElement(metadata, 'md', {
'key': 'com.apple.proapps.custom.dateCreated',
'value': date.strftime('%Y-%m-%d %H:%M:%S'),
'type': 'date',
'editable': '0',
'displayName': 'Date Created',
'source': 'CrossEyed'
})
# Add this clip to the dictionary
clips[name] = clip
else:
# Move the audio clips to the existing clip, and remove this clip
existingClip = clips[name]
videoClip = existingClip.find('.//video/..')
audioClips = clip.findall('.//audio/..')
for audioClip in audioClips:
videoClip.append(audioClip)
project.remove(clip)
tree.write(filename)
|
bartram/crosseyed
|
crosseyed.py
|
Python
|
gpl-2.0
| 2,229
|
from django.contrib.syndication.feeds import Feed
from django.utils import feedgenerator
from models import Event
class KcalEventsFeed(Feed):
title = "One Huge Lesson in Humility, by Myself (Events)"
link = "http://www.ohlih.com"
subtitle = "Making the navel harder to gaze upon."
author_name = "Paul Ford"
item_author_name = "Paul Ford"
item_author_email = "ford@ftrain.com"
item_author_link = "http://www.ftrain.com"
item_copyright = 'Copyright (c) Paul Ford'
feed_guid = 'http://www.ftrain.com/ftrain/feeds/ohlih-events/'
feed_type = feedgenerator.Atom1Feed
def items(self):
return Event.objects.order_by('-time')[:25].select_related()
def item_link(self, obj):
return 'http://www.ohlih.com/ohlih/' + str(obj.time.strftime("%Y/%m/%d")) + '#' + obj.time.strftime("%H:%M")
def item_pubdate(self, item):
return item.time
class KcalDaysFeed(Feed):
title = "One Huge Lesson in Humility, by Myself (Days)"
link = "http://www.ohlih.com"
subtitle = "Making the navel harder to gaze upon."
author_name = "Paul Ford"
item_author_name = "Paul Ford"
item_author_email = "ford@ftrain.com"
item_author_link = "http://www.ftrain.com"
item_copyright = 'Copyright (c) Paul Ford'
feed_guid = 'http://www.ftrain.com/ftrain/feeds/ohlih-events/'
feed_type = feedgenerator.Atom1Feed
def items(self):
return Event.objects.order_by('-time')[:25].select_related()
def item_link(self, obj):
return 'http://www.ohlih.com/ohlih/' + str(obj.time.strftime("%Y/%m/%d")) + '#' + obj.time.strftime("%H:%M")
def item_pubdate(self, item):
return item.time
|
ftrain/django-ftrain
|
kcal/feeds.py
|
Python
|
bsd-3-clause
| 1,694
|
from django.views.generic import DetailView
from eventex.subscriptions.forms import SubscriptionForm
from eventex.subscriptions.mixins import EmailCreateView
from eventex.subscriptions.models import Subscription
new = EmailCreateView.as_view(model=Subscription, form_class=SubscriptionForm,
email_subject='Confirmação de Inscrição')
# Trocar o pk de ser um id para ser um hash
detail = DetailView.as_view(model=Subscription)
|
rtancman/eventex
|
eventex/subscriptions/views.py
|
Python
|
gpl-2.0
| 461
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.